1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_rxtx.h" 7 #include "axgbe_ethdev.h" 8 #include "axgbe_common.h" 9 #include "axgbe_phy.h" 10 #include "axgbe_regs.h" 11 12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 14 static int axgbe_dev_configure(struct rte_eth_dev *dev); 15 static int axgbe_dev_start(struct rte_eth_dev *dev); 16 static void axgbe_dev_stop(struct rte_eth_dev *dev); 17 static void axgbe_dev_interrupt_handler(void *param); 18 static void axgbe_dev_close(struct rte_eth_dev *dev); 19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, 24 struct rte_ether_addr *mac_addr); 25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, 26 struct rte_ether_addr *mac_addr, 27 uint32_t index, 28 uint32_t vmdq); 29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 31 struct rte_ether_addr *mc_addr_set, 32 uint32_t nb_mc_addr); 33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 34 struct rte_ether_addr *mac_addr, 35 uint8_t add); 36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, 37 uint8_t add); 38 static int axgbe_dev_link_update(struct rte_eth_dev *dev, 39 int wait_to_complete); 40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev, 41 struct rte_dev_reg_info *regs); 42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 43 struct rte_eth_stats *stats); 44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev); 45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev, 46 struct rte_eth_xstat *stats, 47 unsigned int n); 48 static int 49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 50 struct rte_eth_xstat_name *xstats_names, 51 unsigned int size); 52 static int 53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, 54 const uint64_t *ids, 55 uint64_t *values, 56 unsigned int n); 57 static int 58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 59 struct rte_eth_xstat_name *xstats_names, 60 const uint64_t *ids, 61 unsigned int size); 62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev); 63 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 64 struct rte_eth_rss_reta_entry64 *reta_conf, 65 uint16_t reta_size); 66 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 67 struct rte_eth_rss_reta_entry64 *reta_conf, 68 uint16_t reta_size); 69 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 70 struct rte_eth_rss_conf *rss_conf); 71 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 72 struct rte_eth_rss_conf *rss_conf); 73 static int axgbe_dev_info_get(struct rte_eth_dev *dev, 74 struct rte_eth_dev_info *dev_info); 75 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev, 76 struct rte_eth_fc_conf *fc_conf); 77 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev, 78 struct rte_eth_fc_conf *fc_conf); 79 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 80 struct rte_eth_pfc_conf *pfc_conf); 81 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 82 struct rte_eth_rxq_info *qinfo); 83 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 84 struct rte_eth_txq_info *qinfo); 85 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 86 87 struct axgbe_xstats { 88 char name[RTE_ETH_XSTATS_NAME_SIZE]; 89 int offset; 90 }; 91 92 #define AXGMAC_MMC_STAT(_string, _var) \ 93 { _string, \ 94 offsetof(struct axgbe_mmc_stats, _var), \ 95 } 96 97 static const struct axgbe_xstats axgbe_xstats_strings[] = { 98 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), 99 AXGMAC_MMC_STAT("tx_packets", txframecount_gb), 100 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), 101 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), 102 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), 103 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), 104 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), 105 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), 106 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), 107 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), 108 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), 109 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), 110 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), 111 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes), 112 113 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), 114 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb), 115 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), 116 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), 117 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), 118 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), 119 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), 120 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), 121 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), 122 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), 123 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), 124 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), 125 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), 126 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), 127 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), 128 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), 129 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), 130 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror), 131 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), 132 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), 133 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), 134 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), 135 }; 136 137 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings) 138 139 /* The set of PCI devices this driver supports */ 140 #define AMD_PCI_VENDOR_ID 0x1022 141 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 142 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 143 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 144 145 int axgbe_logtype_init; 146 int axgbe_logtype_driver; 147 148 static const struct rte_pci_id pci_id_axgbe_map[] = { 149 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 150 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 151 { .vendor_id = 0, }, 152 }; 153 154 static struct axgbe_version_data axgbe_v2a = { 155 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 156 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 157 .mmc_64bit = 1, 158 .tx_max_fifo_size = 229376, 159 .rx_max_fifo_size = 229376, 160 .tx_tstamp_workaround = 1, 161 .ecc_support = 1, 162 .i2c_support = 1, 163 .an_cdr_workaround = 1, 164 }; 165 166 static struct axgbe_version_data axgbe_v2b = { 167 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 168 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 169 .mmc_64bit = 1, 170 .tx_max_fifo_size = 65536, 171 .rx_max_fifo_size = 65536, 172 .tx_tstamp_workaround = 1, 173 .ecc_support = 1, 174 .i2c_support = 1, 175 .an_cdr_workaround = 1, 176 }; 177 178 static const struct rte_eth_desc_lim rx_desc_lim = { 179 .nb_max = AXGBE_MAX_RING_DESC, 180 .nb_min = AXGBE_MIN_RING_DESC, 181 .nb_align = 8, 182 }; 183 184 static const struct rte_eth_desc_lim tx_desc_lim = { 185 .nb_max = AXGBE_MAX_RING_DESC, 186 .nb_min = AXGBE_MIN_RING_DESC, 187 .nb_align = 8, 188 }; 189 190 static const struct eth_dev_ops axgbe_eth_dev_ops = { 191 .dev_configure = axgbe_dev_configure, 192 .dev_start = axgbe_dev_start, 193 .dev_stop = axgbe_dev_stop, 194 .dev_close = axgbe_dev_close, 195 .promiscuous_enable = axgbe_dev_promiscuous_enable, 196 .promiscuous_disable = axgbe_dev_promiscuous_disable, 197 .allmulticast_enable = axgbe_dev_allmulticast_enable, 198 .allmulticast_disable = axgbe_dev_allmulticast_disable, 199 .mac_addr_set = axgbe_dev_mac_addr_set, 200 .mac_addr_add = axgbe_dev_mac_addr_add, 201 .mac_addr_remove = axgbe_dev_mac_addr_remove, 202 .set_mc_addr_list = axgbe_dev_set_mc_addr_list, 203 .uc_hash_table_set = axgbe_dev_uc_hash_table_set, 204 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set, 205 .link_update = axgbe_dev_link_update, 206 .get_reg = axgbe_dev_get_regs, 207 .stats_get = axgbe_dev_stats_get, 208 .stats_reset = axgbe_dev_stats_reset, 209 .xstats_get = axgbe_dev_xstats_get, 210 .xstats_reset = axgbe_dev_xstats_reset, 211 .xstats_get_names = axgbe_dev_xstats_get_names, 212 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id, 213 .xstats_get_by_id = axgbe_dev_xstats_get_by_id, 214 .reta_update = axgbe_dev_rss_reta_update, 215 .reta_query = axgbe_dev_rss_reta_query, 216 .rss_hash_update = axgbe_dev_rss_hash_update, 217 .rss_hash_conf_get = axgbe_dev_rss_hash_conf_get, 218 .dev_infos_get = axgbe_dev_info_get, 219 .rx_queue_setup = axgbe_dev_rx_queue_setup, 220 .rx_queue_release = axgbe_dev_rx_queue_release, 221 .tx_queue_setup = axgbe_dev_tx_queue_setup, 222 .tx_queue_release = axgbe_dev_tx_queue_release, 223 .flow_ctrl_get = axgbe_flow_ctrl_get, 224 .flow_ctrl_set = axgbe_flow_ctrl_set, 225 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set, 226 .rxq_info_get = axgbe_rxq_info_get, 227 .txq_info_get = axgbe_txq_info_get, 228 .dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get, 229 .rx_descriptor_status = axgbe_dev_rx_descriptor_status, 230 .tx_descriptor_status = axgbe_dev_tx_descriptor_status, 231 }; 232 233 static int axgbe_phy_reset(struct axgbe_port *pdata) 234 { 235 pdata->phy_link = -1; 236 pdata->phy_speed = SPEED_UNKNOWN; 237 return pdata->phy_if.phy_reset(pdata); 238 } 239 240 /* 241 * Interrupt handler triggered by NIC for handling 242 * specific interrupt. 243 * 244 * @param handle 245 * Pointer to interrupt handle. 246 * @param param 247 * The address of parameter (struct rte_eth_dev *) regsitered before. 248 * 249 * @return 250 * void 251 */ 252 static void 253 axgbe_dev_interrupt_handler(void *param) 254 { 255 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 256 struct axgbe_port *pdata = dev->data->dev_private; 257 unsigned int dma_isr, dma_ch_isr; 258 259 pdata->phy_if.an_isr(pdata); 260 /*DMA related interrupts*/ 261 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 262 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr); 263 if (dma_isr) { 264 if (dma_isr & 1) { 265 dma_ch_isr = 266 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 267 pdata->rx_queues[0], 268 DMA_CH_SR); 269 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr); 270 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 271 pdata->rx_queues[0], 272 DMA_CH_SR, dma_ch_isr); 273 } 274 } 275 /* Unmask interrupts since disabled after generation */ 276 rte_intr_ack(&pdata->pci_dev->intr_handle); 277 } 278 279 /* 280 * Configure device link speed and setup link. 281 * It returns 0 on success. 282 */ 283 static int 284 axgbe_dev_configure(struct rte_eth_dev *dev) 285 { 286 struct axgbe_port *pdata = dev->data->dev_private; 287 /* Checksum offload to hardware */ 288 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 289 DEV_RX_OFFLOAD_CHECKSUM; 290 return 0; 291 } 292 293 static int 294 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 295 { 296 struct axgbe_port *pdata = dev->data->dev_private; 297 298 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 299 pdata->rss_enable = 1; 300 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 301 pdata->rss_enable = 0; 302 else 303 return -1; 304 return 0; 305 } 306 307 static int 308 axgbe_dev_start(struct rte_eth_dev *dev) 309 { 310 struct axgbe_port *pdata = dev->data->dev_private; 311 int ret; 312 struct rte_eth_dev_data *dev_data = dev->data; 313 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len; 314 315 dev->dev_ops = &axgbe_eth_dev_ops; 316 317 PMD_INIT_FUNC_TRACE(); 318 319 /* Multiqueue RSS */ 320 ret = axgbe_dev_rx_mq_config(dev); 321 if (ret) { 322 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 323 return ret; 324 } 325 ret = axgbe_phy_reset(pdata); 326 if (ret) { 327 PMD_DRV_LOG(ERR, "phy reset failed\n"); 328 return ret; 329 } 330 ret = pdata->hw_if.init(pdata); 331 if (ret) { 332 PMD_DRV_LOG(ERR, "dev_init failed\n"); 333 return ret; 334 } 335 336 /* enable uio/vfio intr/eventfd mapping */ 337 rte_intr_enable(&pdata->pci_dev->intr_handle); 338 339 /* phy start*/ 340 pdata->phy_if.phy_start(pdata); 341 axgbe_dev_enable_tx(dev); 342 axgbe_dev_enable_rx(dev); 343 344 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); 345 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); 346 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || 347 max_pkt_len > pdata->rx_buf_size) 348 dev_data->scattered_rx = 1; 349 350 /* Scatter Rx handling */ 351 if (dev_data->scattered_rx) 352 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; 353 else 354 dev->rx_pkt_burst = &axgbe_recv_pkts; 355 356 return 0; 357 } 358 359 /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 360 static void 361 axgbe_dev_stop(struct rte_eth_dev *dev) 362 { 363 struct axgbe_port *pdata = dev->data->dev_private; 364 365 PMD_INIT_FUNC_TRACE(); 366 367 rte_intr_disable(&pdata->pci_dev->intr_handle); 368 369 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) 370 return; 371 372 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 373 axgbe_dev_disable_tx(dev); 374 axgbe_dev_disable_rx(dev); 375 376 pdata->phy_if.phy_stop(pdata); 377 pdata->hw_if.exit(pdata); 378 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 379 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 380 } 381 382 /* Clear all resources like TX/RX queues. */ 383 static void 384 axgbe_dev_close(struct rte_eth_dev *dev) 385 { 386 axgbe_dev_clear_queues(dev); 387 } 388 389 static int 390 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 391 { 392 struct axgbe_port *pdata = dev->data->dev_private; 393 394 PMD_INIT_FUNC_TRACE(); 395 396 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 397 398 return 0; 399 } 400 401 static int 402 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 403 { 404 struct axgbe_port *pdata = dev->data->dev_private; 405 406 PMD_INIT_FUNC_TRACE(); 407 408 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 409 410 return 0; 411 } 412 413 static int 414 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 415 { 416 struct axgbe_port *pdata = dev->data->dev_private; 417 418 PMD_INIT_FUNC_TRACE(); 419 420 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 421 return 0; 422 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 423 424 return 0; 425 } 426 427 static int 428 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 429 { 430 struct axgbe_port *pdata = dev->data->dev_private; 431 432 PMD_INIT_FUNC_TRACE(); 433 434 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 435 return 0; 436 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 437 438 return 0; 439 } 440 441 static int 442 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 443 { 444 struct axgbe_port *pdata = dev->data->dev_private; 445 446 /* Set Default MAC Addr */ 447 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0); 448 449 return 0; 450 } 451 452 static int 453 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 454 uint32_t index, uint32_t pool __rte_unused) 455 { 456 struct axgbe_port *pdata = dev->data->dev_private; 457 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 458 459 if (index > hw_feat->addn_mac) { 460 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 461 return -EINVAL; 462 } 463 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index); 464 return 0; 465 } 466 467 static int 468 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 469 struct rte_eth_rss_reta_entry64 *reta_conf, 470 uint16_t reta_size) 471 { 472 struct axgbe_port *pdata = dev->data->dev_private; 473 unsigned int i, idx, shift; 474 int ret; 475 476 if (!pdata->rss_enable) { 477 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 478 return -ENOTSUP; 479 } 480 481 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 482 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 483 return -EINVAL; 484 } 485 486 for (i = 0; i < reta_size; i++) { 487 idx = i / RTE_RETA_GROUP_SIZE; 488 shift = i % RTE_RETA_GROUP_SIZE; 489 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 490 continue; 491 pdata->rss_table[i] = reta_conf[idx].reta[shift]; 492 } 493 494 /* Program the lookup table */ 495 ret = axgbe_write_rss_lookup_table(pdata); 496 return ret; 497 } 498 499 static int 500 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 501 struct rte_eth_rss_reta_entry64 *reta_conf, 502 uint16_t reta_size) 503 { 504 struct axgbe_port *pdata = dev->data->dev_private; 505 unsigned int i, idx, shift; 506 507 if (!pdata->rss_enable) { 508 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 509 return -ENOTSUP; 510 } 511 512 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 513 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 514 return -EINVAL; 515 } 516 517 for (i = 0; i < reta_size; i++) { 518 idx = i / RTE_RETA_GROUP_SIZE; 519 shift = i % RTE_RETA_GROUP_SIZE; 520 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 521 continue; 522 reta_conf[idx].reta[shift] = pdata->rss_table[i]; 523 } 524 return 0; 525 } 526 527 static int 528 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 529 struct rte_eth_rss_conf *rss_conf) 530 { 531 struct axgbe_port *pdata = dev->data->dev_private; 532 int ret; 533 534 if (!pdata->rss_enable) { 535 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 536 return -ENOTSUP; 537 } 538 539 if (rss_conf == NULL) { 540 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 541 return -EINVAL; 542 } 543 544 if (rss_conf->rss_key != NULL && 545 rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) { 546 rte_memcpy(pdata->rss_key, rss_conf->rss_key, 547 AXGBE_RSS_HASH_KEY_SIZE); 548 /* Program the hash key */ 549 ret = axgbe_write_rss_hash_key(pdata); 550 if (ret != 0) 551 return ret; 552 } 553 554 pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD; 555 556 if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6)) 557 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 558 if (pdata->rss_hf & 559 (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP)) 560 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); 561 if (pdata->rss_hf & 562 (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP)) 563 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 564 565 /* Set the RSS options */ 566 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 567 568 return 0; 569 } 570 571 static int 572 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 573 struct rte_eth_rss_conf *rss_conf) 574 { 575 struct axgbe_port *pdata = dev->data->dev_private; 576 577 if (!pdata->rss_enable) { 578 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 579 return -ENOTSUP; 580 } 581 582 if (rss_conf == NULL) { 583 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 584 return -EINVAL; 585 } 586 587 if (rss_conf->rss_key != NULL && 588 rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) { 589 rte_memcpy(rss_conf->rss_key, pdata->rss_key, 590 AXGBE_RSS_HASH_KEY_SIZE); 591 } 592 rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE; 593 rss_conf->rss_hf = pdata->rss_hf; 594 return 0; 595 } 596 597 static void 598 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 599 { 600 struct axgbe_port *pdata = dev->data->dev_private; 601 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 602 603 if (index > hw_feat->addn_mac) { 604 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 605 return; 606 } 607 axgbe_set_mac_addn_addr(pdata, NULL, index); 608 } 609 610 static int 611 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 612 struct rte_ether_addr *mc_addr_set, 613 uint32_t nb_mc_addr) 614 { 615 struct axgbe_port *pdata = dev->data->dev_private; 616 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 617 uint32_t index = 1; /* 0 is always default mac */ 618 uint32_t i; 619 620 if (nb_mc_addr > hw_feat->addn_mac) { 621 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr); 622 return -EINVAL; 623 } 624 625 /* clear unicast addresses */ 626 for (i = 1; i < hw_feat->addn_mac; i++) { 627 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) 628 continue; 629 memset(&dev->data->mac_addrs[i], 0, 630 sizeof(struct rte_ether_addr)); 631 } 632 633 while (nb_mc_addr--) 634 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++); 635 636 return 0; 637 } 638 639 static int 640 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 641 struct rte_ether_addr *mac_addr, uint8_t add) 642 { 643 struct axgbe_port *pdata = dev->data->dev_private; 644 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 645 646 if (!hw_feat->hash_table_size) { 647 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 648 return -ENOTSUP; 649 } 650 651 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add); 652 653 if (pdata->uc_hash_mac_addr > 0) { 654 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 655 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 656 } else { 657 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 658 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 659 } 660 return 0; 661 } 662 663 static int 664 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add) 665 { 666 struct axgbe_port *pdata = dev->data->dev_private; 667 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 668 uint32_t index; 669 670 if (!hw_feat->hash_table_size) { 671 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 672 return -ENOTSUP; 673 } 674 675 for (index = 0; index < pdata->hash_table_count; index++) { 676 if (add) 677 pdata->uc_hash_table[index] = ~0; 678 else 679 pdata->uc_hash_table[index] = 0; 680 681 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n", 682 add ? "set" : "clear", index); 683 684 AXGMAC_IOWRITE(pdata, MAC_HTR(index), 685 pdata->uc_hash_table[index]); 686 } 687 688 if (add) { 689 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 690 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 691 } else { 692 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 693 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 694 } 695 return 0; 696 } 697 698 /* return 0 means link status changed, -1 means not changed */ 699 static int 700 axgbe_dev_link_update(struct rte_eth_dev *dev, 701 int wait_to_complete __rte_unused) 702 { 703 struct axgbe_port *pdata = dev->data->dev_private; 704 struct rte_eth_link link; 705 int ret = 0; 706 707 PMD_INIT_FUNC_TRACE(); 708 rte_delay_ms(800); 709 710 pdata->phy_if.phy_status(pdata); 711 712 memset(&link, 0, sizeof(struct rte_eth_link)); 713 link.link_duplex = pdata->phy.duplex; 714 link.link_status = pdata->phy_link; 715 link.link_speed = pdata->phy_speed; 716 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 717 ETH_LINK_SPEED_FIXED); 718 ret = rte_eth_linkstatus_set(dev, &link); 719 if (ret == -1) 720 PMD_DRV_LOG(ERR, "No change in link status\n"); 721 722 return ret; 723 } 724 725 static int 726 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 727 { 728 struct axgbe_port *pdata = dev->data->dev_private; 729 730 if (regs->data == NULL) { 731 regs->length = axgbe_regs_get_count(pdata); 732 regs->width = sizeof(uint32_t); 733 return 0; 734 } 735 736 /* Only full register dump is supported */ 737 if (regs->length && 738 regs->length != (uint32_t)axgbe_regs_get_count(pdata)) 739 return -ENOTSUP; 740 741 regs->version = pdata->pci_dev->id.vendor_id << 16 | 742 pdata->pci_dev->id.device_id; 743 axgbe_regs_dump(pdata, regs->data); 744 return 0; 745 } 746 static void axgbe_read_mmc_stats(struct axgbe_port *pdata) 747 { 748 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 749 750 /* Freeze counters */ 751 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 752 753 /* Tx counters */ 754 stats->txoctetcount_gb += 755 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 756 stats->txoctetcount_gb += 757 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32); 758 759 stats->txframecount_gb += 760 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 761 stats->txframecount_gb += 762 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32); 763 764 stats->txbroadcastframes_g += 765 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 766 stats->txbroadcastframes_g += 767 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32); 768 769 stats->txmulticastframes_g += 770 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 771 stats->txmulticastframes_g += 772 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32); 773 774 stats->tx64octets_gb += 775 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 776 stats->tx64octets_gb += 777 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32); 778 779 stats->tx65to127octets_gb += 780 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 781 stats->tx65to127octets_gb += 782 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32); 783 784 stats->tx128to255octets_gb += 785 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 786 stats->tx128to255octets_gb += 787 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32); 788 789 stats->tx256to511octets_gb += 790 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 791 stats->tx256to511octets_gb += 792 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32); 793 794 stats->tx512to1023octets_gb += 795 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 796 stats->tx512to1023octets_gb += 797 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32); 798 799 stats->tx1024tomaxoctets_gb += 800 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 801 stats->tx1024tomaxoctets_gb += 802 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32); 803 804 stats->txunicastframes_gb += 805 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 806 stats->txunicastframes_gb += 807 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32); 808 809 stats->txmulticastframes_gb += 810 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 811 stats->txmulticastframes_gb += 812 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32); 813 814 stats->txbroadcastframes_g += 815 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 816 stats->txbroadcastframes_g += 817 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32); 818 819 stats->txunderflowerror += 820 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 821 stats->txunderflowerror += 822 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32); 823 824 stats->txoctetcount_g += 825 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 826 stats->txoctetcount_g += 827 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32); 828 829 stats->txframecount_g += 830 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 831 stats->txframecount_g += 832 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32); 833 834 stats->txpauseframes += 835 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 836 stats->txpauseframes += 837 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32); 838 839 stats->txvlanframes_g += 840 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 841 stats->txvlanframes_g += 842 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32); 843 844 /* Rx counters */ 845 stats->rxframecount_gb += 846 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 847 stats->rxframecount_gb += 848 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32); 849 850 stats->rxoctetcount_gb += 851 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 852 stats->rxoctetcount_gb += 853 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32); 854 855 stats->rxoctetcount_g += 856 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 857 stats->rxoctetcount_g += 858 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32); 859 860 stats->rxbroadcastframes_g += 861 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 862 stats->rxbroadcastframes_g += 863 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32); 864 865 stats->rxmulticastframes_g += 866 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 867 stats->rxmulticastframes_g += 868 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32); 869 870 stats->rxcrcerror += 871 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 872 stats->rxcrcerror += 873 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32); 874 875 stats->rxrunterror += 876 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 877 878 stats->rxjabbererror += 879 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 880 881 stats->rxundersize_g += 882 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 883 884 stats->rxoversize_g += 885 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 886 887 stats->rx64octets_gb += 888 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 889 stats->rx64octets_gb += 890 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32); 891 892 stats->rx65to127octets_gb += 893 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 894 stats->rx65to127octets_gb += 895 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32); 896 897 stats->rx128to255octets_gb += 898 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 899 stats->rx128to255octets_gb += 900 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32); 901 902 stats->rx256to511octets_gb += 903 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 904 stats->rx256to511octets_gb += 905 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32); 906 907 stats->rx512to1023octets_gb += 908 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 909 stats->rx512to1023octets_gb += 910 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32); 911 912 stats->rx1024tomaxoctets_gb += 913 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 914 stats->rx1024tomaxoctets_gb += 915 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32); 916 917 stats->rxunicastframes_g += 918 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 919 stats->rxunicastframes_g += 920 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32); 921 922 stats->rxlengtherror += 923 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 924 stats->rxlengtherror += 925 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32); 926 927 stats->rxoutofrangetype += 928 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 929 stats->rxoutofrangetype += 930 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32); 931 932 stats->rxpauseframes += 933 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 934 stats->rxpauseframes += 935 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32); 936 937 stats->rxfifooverflow += 938 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 939 stats->rxfifooverflow += 940 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32); 941 942 stats->rxvlanframes_gb += 943 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 944 stats->rxvlanframes_gb += 945 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32); 946 947 stats->rxwatchdogerror += 948 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 949 950 /* Un-freeze counters */ 951 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 952 } 953 954 static int 955 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 956 unsigned int n) 957 { 958 struct axgbe_port *pdata = dev->data->dev_private; 959 unsigned int i; 960 961 if (!stats) 962 return 0; 963 964 axgbe_read_mmc_stats(pdata); 965 966 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) { 967 stats[i].id = i; 968 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats + 969 axgbe_xstats_strings[i].offset); 970 } 971 972 return i; 973 } 974 975 static int 976 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 977 struct rte_eth_xstat_name *xstats_names, 978 unsigned int n) 979 { 980 unsigned int i; 981 982 if (n >= AXGBE_XSTATS_COUNT && xstats_names) { 983 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) { 984 snprintf(xstats_names[i].name, 985 RTE_ETH_XSTATS_NAME_SIZE, "%s", 986 axgbe_xstats_strings[i].name); 987 } 988 } 989 990 return AXGBE_XSTATS_COUNT; 991 } 992 993 static int 994 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 995 uint64_t *values, unsigned int n) 996 { 997 unsigned int i; 998 uint64_t values_copy[AXGBE_XSTATS_COUNT]; 999 1000 if (!ids) { 1001 struct axgbe_port *pdata = dev->data->dev_private; 1002 1003 if (n < AXGBE_XSTATS_COUNT) 1004 return AXGBE_XSTATS_COUNT; 1005 1006 axgbe_read_mmc_stats(pdata); 1007 1008 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { 1009 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats + 1010 axgbe_xstats_strings[i].offset); 1011 } 1012 1013 return i; 1014 } 1015 1016 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT); 1017 1018 for (i = 0; i < n; i++) { 1019 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1020 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1021 return -1; 1022 } 1023 values[i] = values_copy[ids[i]]; 1024 } 1025 return n; 1026 } 1027 1028 static int 1029 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 1030 struct rte_eth_xstat_name *xstats_names, 1031 const uint64_t *ids, 1032 unsigned int size) 1033 { 1034 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT]; 1035 unsigned int i; 1036 1037 if (!ids) 1038 return axgbe_dev_xstats_get_names(dev, xstats_names, size); 1039 1040 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size); 1041 1042 for (i = 0; i < size; i++) { 1043 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1044 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1045 return -1; 1046 } 1047 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1048 } 1049 return size; 1050 } 1051 1052 static int 1053 axgbe_dev_xstats_reset(struct rte_eth_dev *dev) 1054 { 1055 struct axgbe_port *pdata = dev->data->dev_private; 1056 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 1057 1058 /* MMC registers are configured for reset on read */ 1059 axgbe_read_mmc_stats(pdata); 1060 1061 /* Reset stats */ 1062 memset(stats, 0, sizeof(*stats)); 1063 1064 return 0; 1065 } 1066 1067 static int 1068 axgbe_dev_stats_get(struct rte_eth_dev *dev, 1069 struct rte_eth_stats *stats) 1070 { 1071 struct axgbe_rx_queue *rxq; 1072 struct axgbe_tx_queue *txq; 1073 struct axgbe_port *pdata = dev->data->dev_private; 1074 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats; 1075 unsigned int i; 1076 1077 axgbe_read_mmc_stats(pdata); 1078 1079 stats->imissed = mmc_stats->rxfifooverflow; 1080 1081 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1082 rxq = dev->data->rx_queues[i]; 1083 stats->q_ipackets[i] = rxq->pkts; 1084 stats->ipackets += rxq->pkts; 1085 stats->q_ibytes[i] = rxq->bytes; 1086 stats->ibytes += rxq->bytes; 1087 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed; 1088 stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed; 1089 stats->ierrors += rxq->errors; 1090 } 1091 1092 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1093 txq = dev->data->tx_queues[i]; 1094 stats->q_opackets[i] = txq->pkts; 1095 stats->opackets += txq->pkts; 1096 stats->q_obytes[i] = txq->bytes; 1097 stats->obytes += txq->bytes; 1098 stats->oerrors += txq->errors; 1099 } 1100 1101 return 0; 1102 } 1103 1104 static int 1105 axgbe_dev_stats_reset(struct rte_eth_dev *dev) 1106 { 1107 struct axgbe_rx_queue *rxq; 1108 struct axgbe_tx_queue *txq; 1109 unsigned int i; 1110 1111 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1112 rxq = dev->data->rx_queues[i]; 1113 rxq->pkts = 0; 1114 rxq->bytes = 0; 1115 rxq->errors = 0; 1116 rxq->rx_mbuf_alloc_failed = 0; 1117 } 1118 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1119 txq = dev->data->tx_queues[i]; 1120 txq->pkts = 0; 1121 txq->bytes = 0; 1122 txq->errors = 0; 1123 } 1124 1125 return 0; 1126 } 1127 1128 static int 1129 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1130 { 1131 struct axgbe_port *pdata = dev->data->dev_private; 1132 1133 dev_info->max_rx_queues = pdata->rx_ring_count; 1134 dev_info->max_tx_queues = pdata->tx_ring_count; 1135 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 1136 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 1137 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1; 1138 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size; 1139 dev_info->speed_capa = ETH_LINK_SPEED_10G; 1140 1141 dev_info->rx_offload_capa = 1142 DEV_RX_OFFLOAD_IPV4_CKSUM | 1143 DEV_RX_OFFLOAD_UDP_CKSUM | 1144 DEV_RX_OFFLOAD_TCP_CKSUM | 1145 DEV_RX_OFFLOAD_JUMBO_FRAME | 1146 DEV_RX_OFFLOAD_SCATTER | 1147 DEV_RX_OFFLOAD_KEEP_CRC; 1148 1149 dev_info->tx_offload_capa = 1150 DEV_TX_OFFLOAD_IPV4_CKSUM | 1151 DEV_TX_OFFLOAD_UDP_CKSUM | 1152 DEV_TX_OFFLOAD_TCP_CKSUM; 1153 1154 if (pdata->hw_feat.rss) { 1155 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 1156 dev_info->reta_size = pdata->hw_feat.hash_table_size; 1157 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 1158 } 1159 1160 dev_info->rx_desc_lim = rx_desc_lim; 1161 dev_info->tx_desc_lim = tx_desc_lim; 1162 1163 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1164 .rx_free_thresh = AXGBE_RX_FREE_THRESH, 1165 }; 1166 1167 dev_info->default_txconf = (struct rte_eth_txconf) { 1168 .tx_free_thresh = AXGBE_TX_FREE_THRESH, 1169 }; 1170 1171 return 0; 1172 } 1173 1174 static int 1175 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1176 { 1177 struct axgbe_port *pdata = dev->data->dev_private; 1178 struct xgbe_fc_info fc = pdata->fc; 1179 unsigned int reg, reg_val = 0; 1180 1181 reg = MAC_Q0TFCR; 1182 reg_val = AXGMAC_IOREAD(pdata, reg); 1183 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA); 1184 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD); 1185 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT); 1186 fc.autoneg = pdata->pause_autoneg; 1187 1188 if (pdata->rx_pause && pdata->tx_pause) 1189 fc.mode = RTE_FC_FULL; 1190 else if (pdata->rx_pause) 1191 fc.mode = RTE_FC_RX_PAUSE; 1192 else if (pdata->tx_pause) 1193 fc.mode = RTE_FC_TX_PAUSE; 1194 else 1195 fc.mode = RTE_FC_NONE; 1196 1197 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024; 1198 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024; 1199 fc_conf->pause_time = fc.pause_time[0]; 1200 fc_conf->send_xon = fc.send_xon; 1201 fc_conf->mode = fc.mode; 1202 1203 return 0; 1204 } 1205 1206 static int 1207 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1208 { 1209 struct axgbe_port *pdata = dev->data->dev_private; 1210 struct xgbe_fc_info fc = pdata->fc; 1211 unsigned int reg, reg_val = 0; 1212 reg = MAC_Q0TFCR; 1213 1214 pdata->pause_autoneg = fc_conf->autoneg; 1215 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1216 fc.send_xon = fc_conf->send_xon; 1217 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA, 1218 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water)); 1219 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD, 1220 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water)); 1221 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time); 1222 AXGMAC_IOWRITE(pdata, reg, reg_val); 1223 fc.mode = fc_conf->mode; 1224 1225 if (fc.mode == RTE_FC_FULL) { 1226 pdata->tx_pause = 1; 1227 pdata->rx_pause = 1; 1228 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1229 pdata->tx_pause = 0; 1230 pdata->rx_pause = 1; 1231 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1232 pdata->tx_pause = 1; 1233 pdata->rx_pause = 0; 1234 } else { 1235 pdata->tx_pause = 0; 1236 pdata->rx_pause = 0; 1237 } 1238 1239 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1240 pdata->hw_if.config_tx_flow_control(pdata); 1241 1242 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1243 pdata->hw_if.config_rx_flow_control(pdata); 1244 1245 pdata->hw_if.config_flow_control(pdata); 1246 pdata->phy.tx_pause = pdata->tx_pause; 1247 pdata->phy.rx_pause = pdata->rx_pause; 1248 1249 return 0; 1250 } 1251 1252 static int 1253 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 1254 struct rte_eth_pfc_conf *pfc_conf) 1255 { 1256 struct axgbe_port *pdata = dev->data->dev_private; 1257 struct xgbe_fc_info fc = pdata->fc; 1258 uint8_t tc_num; 1259 1260 tc_num = pdata->pfc_map[pfc_conf->priority]; 1261 1262 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { 1263 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", 1264 pdata->hw_feat.tc_cnt); 1265 return -EINVAL; 1266 } 1267 1268 pdata->pause_autoneg = pfc_conf->fc.autoneg; 1269 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1270 fc.send_xon = pfc_conf->fc.send_xon; 1271 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA, 1272 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water)); 1273 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD, 1274 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water)); 1275 1276 switch (tc_num) { 1277 case 0: 1278 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1279 PSTC0, pfc_conf->fc.pause_time); 1280 break; 1281 case 1: 1282 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1283 PSTC1, pfc_conf->fc.pause_time); 1284 break; 1285 case 2: 1286 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1287 PSTC2, pfc_conf->fc.pause_time); 1288 break; 1289 case 3: 1290 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1291 PSTC3, pfc_conf->fc.pause_time); 1292 break; 1293 case 4: 1294 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1295 PSTC4, pfc_conf->fc.pause_time); 1296 break; 1297 case 5: 1298 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1299 PSTC5, pfc_conf->fc.pause_time); 1300 break; 1301 case 7: 1302 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1303 PSTC6, pfc_conf->fc.pause_time); 1304 break; 1305 case 6: 1306 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1307 PSTC7, pfc_conf->fc.pause_time); 1308 break; 1309 } 1310 1311 fc.mode = pfc_conf->fc.mode; 1312 1313 if (fc.mode == RTE_FC_FULL) { 1314 pdata->tx_pause = 1; 1315 pdata->rx_pause = 1; 1316 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1317 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1318 pdata->tx_pause = 0; 1319 pdata->rx_pause = 1; 1320 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1321 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1322 pdata->tx_pause = 1; 1323 pdata->rx_pause = 0; 1324 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1325 } else { 1326 pdata->tx_pause = 0; 1327 pdata->rx_pause = 0; 1328 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1329 } 1330 1331 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1332 pdata->hw_if.config_tx_flow_control(pdata); 1333 1334 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1335 pdata->hw_if.config_rx_flow_control(pdata); 1336 pdata->hw_if.config_flow_control(pdata); 1337 pdata->phy.tx_pause = pdata->tx_pause; 1338 pdata->phy.rx_pause = pdata->rx_pause; 1339 1340 return 0; 1341 } 1342 1343 void 1344 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1345 struct rte_eth_rxq_info *qinfo) 1346 { 1347 struct axgbe_rx_queue *rxq; 1348 1349 rxq = dev->data->rx_queues[queue_id]; 1350 qinfo->mp = rxq->mb_pool; 1351 qinfo->scattered_rx = dev->data->scattered_rx; 1352 qinfo->nb_desc = rxq->nb_desc; 1353 qinfo->conf.rx_free_thresh = rxq->free_thresh; 1354 } 1355 1356 void 1357 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1358 struct rte_eth_txq_info *qinfo) 1359 { 1360 struct axgbe_tx_queue *txq; 1361 1362 txq = dev->data->tx_queues[queue_id]; 1363 qinfo->nb_desc = txq->nb_desc; 1364 qinfo->conf.tx_free_thresh = txq->free_thresh; 1365 } 1366 const uint32_t * 1367 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1368 { 1369 static const uint32_t ptypes[] = { 1370 RTE_PTYPE_L2_ETHER, 1371 RTE_PTYPE_L2_ETHER_TIMESYNC, 1372 RTE_PTYPE_L2_ETHER_LLDP, 1373 RTE_PTYPE_L2_ETHER_ARP, 1374 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1375 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 1376 RTE_PTYPE_L4_FRAG, 1377 RTE_PTYPE_L4_ICMP, 1378 RTE_PTYPE_L4_NONFRAG, 1379 RTE_PTYPE_L4_SCTP, 1380 RTE_PTYPE_L4_TCP, 1381 RTE_PTYPE_L4_UDP, 1382 RTE_PTYPE_TUNNEL_GRENAT, 1383 RTE_PTYPE_TUNNEL_IP, 1384 RTE_PTYPE_INNER_L2_ETHER, 1385 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1386 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 1387 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 1388 RTE_PTYPE_INNER_L4_FRAG, 1389 RTE_PTYPE_INNER_L4_ICMP, 1390 RTE_PTYPE_INNER_L4_NONFRAG, 1391 RTE_PTYPE_INNER_L4_SCTP, 1392 RTE_PTYPE_INNER_L4_TCP, 1393 RTE_PTYPE_INNER_L4_UDP, 1394 RTE_PTYPE_UNKNOWN 1395 }; 1396 1397 if (dev->rx_pkt_burst == axgbe_recv_pkts) 1398 return ptypes; 1399 return NULL; 1400 } 1401 1402 static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 1403 { 1404 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 1405 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 1406 1407 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 1408 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 1409 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 1410 1411 memset(hw_feat, 0, sizeof(*hw_feat)); 1412 1413 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 1414 1415 /* Hardware feature register 0 */ 1416 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 1417 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 1418 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 1419 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 1420 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 1421 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 1422 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 1423 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 1424 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 1425 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 1426 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 1427 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 1428 ADDMACADRSEL); 1429 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 1430 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 1431 1432 /* Hardware feature register 1 */ 1433 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1434 RXFIFOSIZE); 1435 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1436 TXFIFOSIZE); 1437 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 1438 MAC_HWF1R, ADVTHWORD); 1439 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 1440 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 1441 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 1442 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 1443 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 1444 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 1445 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 1446 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1447 HASHTBLSZ); 1448 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1449 L3L4FNUM); 1450 1451 /* Hardware feature register 2 */ 1452 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 1453 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 1454 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 1455 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 1456 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 1457 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 1458 AUXSNAPNUM); 1459 1460 /* Translate the Hash Table size into actual number */ 1461 switch (hw_feat->hash_table_size) { 1462 case 0: 1463 break; 1464 case 1: 1465 hw_feat->hash_table_size = 64; 1466 break; 1467 case 2: 1468 hw_feat->hash_table_size = 128; 1469 break; 1470 case 3: 1471 hw_feat->hash_table_size = 256; 1472 break; 1473 } 1474 1475 /* Translate the address width setting into actual number */ 1476 switch (hw_feat->dma_width) { 1477 case 0: 1478 hw_feat->dma_width = 32; 1479 break; 1480 case 1: 1481 hw_feat->dma_width = 40; 1482 break; 1483 case 2: 1484 hw_feat->dma_width = 48; 1485 break; 1486 default: 1487 hw_feat->dma_width = 32; 1488 } 1489 1490 /* The Queue, Channel and TC counts are zero based so increment them 1491 * to get the actual number 1492 */ 1493 hw_feat->rx_q_cnt++; 1494 hw_feat->tx_q_cnt++; 1495 hw_feat->rx_ch_cnt++; 1496 hw_feat->tx_ch_cnt++; 1497 hw_feat->tc_cnt++; 1498 1499 /* Translate the fifo sizes into actual numbers */ 1500 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 1501 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 1502 } 1503 1504 static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 1505 { 1506 axgbe_init_function_ptrs_dev(&pdata->hw_if); 1507 axgbe_init_function_ptrs_phy(&pdata->phy_if); 1508 axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 1509 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 1510 } 1511 1512 static void axgbe_set_counts(struct axgbe_port *pdata) 1513 { 1514 /* Set all the function pointers */ 1515 axgbe_init_all_fptrs(pdata); 1516 1517 /* Populate the hardware features */ 1518 axgbe_get_all_hw_features(pdata); 1519 1520 /* Set default max values if not provided */ 1521 if (!pdata->tx_max_channel_count) 1522 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 1523 if (!pdata->rx_max_channel_count) 1524 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 1525 1526 if (!pdata->tx_max_q_count) 1527 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 1528 if (!pdata->rx_max_q_count) 1529 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 1530 1531 /* Calculate the number of Tx and Rx rings to be created 1532 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 1533 * the number of Tx queues to the number of Tx channels 1534 * enabled 1535 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 1536 * number of Rx queues or maximum allowed 1537 */ 1538 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 1539 pdata->tx_max_channel_count); 1540 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 1541 pdata->tx_max_q_count); 1542 1543 pdata->tx_q_count = pdata->tx_ring_count; 1544 1545 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 1546 pdata->rx_max_channel_count); 1547 1548 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 1549 pdata->rx_max_q_count); 1550 } 1551 1552 static void axgbe_default_config(struct axgbe_port *pdata) 1553 { 1554 pdata->pblx8 = DMA_PBL_X8_ENABLE; 1555 pdata->tx_sf_mode = MTL_TSF_ENABLE; 1556 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 1557 pdata->tx_pbl = DMA_PBL_32; 1558 pdata->tx_osp_mode = DMA_OSP_ENABLE; 1559 pdata->rx_sf_mode = MTL_RSF_ENABLE; 1560 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 1561 pdata->rx_pbl = DMA_PBL_32; 1562 pdata->pause_autoneg = 1; 1563 pdata->tx_pause = 0; 1564 pdata->rx_pause = 0; 1565 pdata->phy_speed = SPEED_UNKNOWN; 1566 pdata->power_down = 0; 1567 } 1568 1569 static int 1570 pci_device_cmp(const struct rte_device *dev, const void *_pci_id) 1571 { 1572 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); 1573 const struct rte_pci_id *pcid = _pci_id; 1574 1575 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID && 1576 pdev->id.device_id == pcid->device_id) 1577 return 0; 1578 return 1; 1579 } 1580 1581 static bool 1582 pci_search_device(int device_id) 1583 { 1584 struct rte_bus *pci_bus; 1585 struct rte_pci_id dev_id; 1586 1587 dev_id.device_id = device_id; 1588 pci_bus = rte_bus_find_by_name("pci"); 1589 return (pci_bus != NULL) && 1590 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL); 1591 } 1592 1593 /* 1594 * It returns 0 on success. 1595 */ 1596 static int 1597 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 1598 { 1599 PMD_INIT_FUNC_TRACE(); 1600 struct axgbe_port *pdata; 1601 struct rte_pci_device *pci_dev; 1602 uint32_t reg, mac_lo, mac_hi; 1603 uint32_t len; 1604 int ret; 1605 1606 eth_dev->dev_ops = &axgbe_eth_dev_ops; 1607 1608 /* 1609 * For secondary processes, we don't initialise any further as primary 1610 * has already done this work. 1611 */ 1612 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1613 return 0; 1614 1615 pdata = eth_dev->data->dev_private; 1616 /* initial state */ 1617 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 1618 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 1619 pdata->eth_dev = eth_dev; 1620 1621 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1622 pdata->pci_dev = pci_dev; 1623 1624 /* 1625 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE 1626 */ 1627 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) { 1628 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; 1629 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; 1630 } else { 1631 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 1632 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 1633 } 1634 1635 pdata->xgmac_regs = 1636 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 1637 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 1638 + AXGBE_MAC_PROP_OFFSET); 1639 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 1640 + AXGBE_I2C_CTRL_OFFSET); 1641 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 1642 1643 /* version specific driver data*/ 1644 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 1645 pdata->vdata = &axgbe_v2a; 1646 else 1647 pdata->vdata = &axgbe_v2b; 1648 1649 /* Configure the PCS indirect addressing support */ 1650 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); 1651 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 1652 pdata->xpcs_window <<= 6; 1653 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 1654 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 1655 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 1656 1657 PMD_INIT_LOG(DEBUG, 1658 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 1659 pdata->xpcs_window_size, pdata->xpcs_window_mask); 1660 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 1661 1662 /* Retrieve the MAC address */ 1663 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 1664 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 1665 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 1666 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 1667 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 1668 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 1669 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 1670 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 1671 1672 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS; 1673 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0); 1674 1675 if (!eth_dev->data->mac_addrs) { 1676 PMD_INIT_LOG(ERR, 1677 "Failed to alloc %u bytes needed to " 1678 "store MAC addresses", len); 1679 return -ENOMEM; 1680 } 1681 1682 /* Allocate memory for storing hash filter MAC addresses */ 1683 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS; 1684 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr", 1685 len, 0); 1686 1687 if (eth_dev->data->hash_mac_addrs == NULL) { 1688 PMD_INIT_LOG(ERR, 1689 "Failed to allocate %d bytes needed to " 1690 "store MAC addresses", len); 1691 return -ENOMEM; 1692 } 1693 1694 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 1695 rte_eth_random_addr(pdata->mac_addr.addr_bytes); 1696 1697 /* Copy the permanent MAC address */ 1698 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 1699 1700 /* Clock settings */ 1701 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 1702 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 1703 1704 /* Set the DMA coherency values */ 1705 pdata->coherent = 1; 1706 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 1707 pdata->arcache = AXGBE_DMA_OS_ARCACHE; 1708 pdata->awcache = AXGBE_DMA_OS_AWCACHE; 1709 1710 /* Set the maximum channels and queues */ 1711 reg = XP_IOREAD(pdata, XP_PROP_1); 1712 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 1713 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 1714 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 1715 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 1716 1717 /* Set the hardware channel and queue counts */ 1718 axgbe_set_counts(pdata); 1719 1720 /* Set the maximum fifo amounts */ 1721 reg = XP_IOREAD(pdata, XP_PROP_2); 1722 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 1723 pdata->tx_max_fifo_size *= 16384; 1724 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 1725 pdata->vdata->tx_max_fifo_size); 1726 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 1727 pdata->rx_max_fifo_size *= 16384; 1728 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 1729 pdata->vdata->rx_max_fifo_size); 1730 /* Issue software reset to DMA */ 1731 ret = pdata->hw_if.exit(pdata); 1732 if (ret) 1733 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 1734 1735 /* Set default configuration data */ 1736 axgbe_default_config(pdata); 1737 1738 /* Set default max values if not provided */ 1739 if (!pdata->tx_max_fifo_size) 1740 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 1741 if (!pdata->rx_max_fifo_size) 1742 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 1743 1744 pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 1745 pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 1746 pthread_mutex_init(&pdata->xpcs_mutex, NULL); 1747 pthread_mutex_init(&pdata->i2c_mutex, NULL); 1748 pthread_mutex_init(&pdata->an_mutex, NULL); 1749 pthread_mutex_init(&pdata->phy_mutex, NULL); 1750 1751 ret = pdata->phy_if.phy_init(pdata); 1752 if (ret) { 1753 rte_free(eth_dev->data->mac_addrs); 1754 eth_dev->data->mac_addrs = NULL; 1755 return ret; 1756 } 1757 1758 rte_intr_callback_register(&pci_dev->intr_handle, 1759 axgbe_dev_interrupt_handler, 1760 (void *)eth_dev); 1761 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1762 eth_dev->data->port_id, pci_dev->id.vendor_id, 1763 pci_dev->id.device_id); 1764 1765 return 0; 1766 } 1767 1768 static int 1769 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1770 { 1771 struct rte_pci_device *pci_dev; 1772 1773 PMD_INIT_FUNC_TRACE(); 1774 1775 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1776 return 0; 1777 1778 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1779 eth_dev->dev_ops = NULL; 1780 eth_dev->rx_pkt_burst = NULL; 1781 eth_dev->tx_pkt_burst = NULL; 1782 axgbe_dev_clear_queues(eth_dev); 1783 1784 /* disable uio intr before callback unregister */ 1785 rte_intr_disable(&pci_dev->intr_handle); 1786 rte_intr_callback_unregister(&pci_dev->intr_handle, 1787 axgbe_dev_interrupt_handler, 1788 (void *)eth_dev); 1789 1790 return 0; 1791 } 1792 1793 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1794 struct rte_pci_device *pci_dev) 1795 { 1796 return rte_eth_dev_pci_generic_probe(pci_dev, 1797 sizeof(struct axgbe_port), eth_axgbe_dev_init); 1798 } 1799 1800 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 1801 { 1802 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 1803 } 1804 1805 static struct rte_pci_driver rte_axgbe_pmd = { 1806 .id_table = pci_id_axgbe_map, 1807 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1808 .probe = eth_axgbe_pci_probe, 1809 .remove = eth_axgbe_pci_remove, 1810 }; 1811 1812 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 1813 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 1814 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 1815 1816 RTE_INIT(axgbe_init_log) 1817 { 1818 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); 1819 if (axgbe_logtype_init >= 0) 1820 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); 1821 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); 1822 if (axgbe_logtype_driver >= 0) 1823 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); 1824 } 1825