1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_rxtx.h" 7 #include "axgbe_ethdev.h" 8 #include "axgbe_common.h" 9 #include "axgbe_phy.h" 10 #include "axgbe_regs.h" 11 12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 14 static int axgbe_dev_configure(struct rte_eth_dev *dev); 15 static int axgbe_dev_start(struct rte_eth_dev *dev); 16 static void axgbe_dev_stop(struct rte_eth_dev *dev); 17 static void axgbe_dev_interrupt_handler(void *param); 18 static void axgbe_dev_close(struct rte_eth_dev *dev); 19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, 24 struct rte_ether_addr *mac_addr); 25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, 26 struct rte_ether_addr *mac_addr, 27 uint32_t index, 28 uint32_t vmdq); 29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 31 struct rte_ether_addr *mc_addr_set, 32 uint32_t nb_mc_addr); 33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 34 struct rte_ether_addr *mac_addr, 35 uint8_t add); 36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, 37 uint8_t add); 38 static int axgbe_dev_link_update(struct rte_eth_dev *dev, 39 int wait_to_complete); 40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev, 41 struct rte_dev_reg_info *regs); 42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 43 struct rte_eth_stats *stats); 44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev); 45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev, 46 struct rte_eth_xstat *stats, 47 unsigned int n); 48 static int 49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 50 struct rte_eth_xstat_name *xstats_names, 51 unsigned int size); 52 static int 53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, 54 const uint64_t *ids, 55 uint64_t *values, 56 unsigned int n); 57 static int 58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 59 struct rte_eth_xstat_name *xstats_names, 60 const uint64_t *ids, 61 unsigned int size); 62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev); 63 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 64 struct rte_eth_rss_reta_entry64 *reta_conf, 65 uint16_t reta_size); 66 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 67 struct rte_eth_rss_reta_entry64 *reta_conf, 68 uint16_t reta_size); 69 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 70 struct rte_eth_rss_conf *rss_conf); 71 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 72 struct rte_eth_rss_conf *rss_conf); 73 static int axgbe_dev_info_get(struct rte_eth_dev *dev, 74 struct rte_eth_dev_info *dev_info); 75 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev, 76 struct rte_eth_fc_conf *fc_conf); 77 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev, 78 struct rte_eth_fc_conf *fc_conf); 79 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 80 struct rte_eth_pfc_conf *pfc_conf); 81 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 82 struct rte_eth_rxq_info *qinfo); 83 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 84 struct rte_eth_txq_info *qinfo); 85 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 86 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 87 88 struct axgbe_xstats { 89 char name[RTE_ETH_XSTATS_NAME_SIZE]; 90 int offset; 91 }; 92 93 #define AXGMAC_MMC_STAT(_string, _var) \ 94 { _string, \ 95 offsetof(struct axgbe_mmc_stats, _var), \ 96 } 97 98 static const struct axgbe_xstats axgbe_xstats_strings[] = { 99 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), 100 AXGMAC_MMC_STAT("tx_packets", txframecount_gb), 101 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), 102 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), 103 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), 104 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), 105 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), 106 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), 107 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), 108 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), 109 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), 110 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), 111 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), 112 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes), 113 114 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), 115 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb), 116 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), 117 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), 118 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), 119 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), 120 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), 121 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), 122 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), 123 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), 124 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), 125 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), 126 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), 127 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), 128 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), 129 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), 130 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), 131 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror), 132 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), 133 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), 134 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), 135 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), 136 }; 137 138 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings) 139 140 /* The set of PCI devices this driver supports */ 141 #define AMD_PCI_VENDOR_ID 0x1022 142 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 143 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 144 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 145 146 static const struct rte_pci_id pci_id_axgbe_map[] = { 147 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 148 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 149 { .vendor_id = 0, }, 150 }; 151 152 static struct axgbe_version_data axgbe_v2a = { 153 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 154 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 155 .mmc_64bit = 1, 156 .tx_max_fifo_size = 229376, 157 .rx_max_fifo_size = 229376, 158 .tx_tstamp_workaround = 1, 159 .ecc_support = 1, 160 .i2c_support = 1, 161 .an_cdr_workaround = 1, 162 }; 163 164 static struct axgbe_version_data axgbe_v2b = { 165 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 166 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 167 .mmc_64bit = 1, 168 .tx_max_fifo_size = 65536, 169 .rx_max_fifo_size = 65536, 170 .tx_tstamp_workaround = 1, 171 .ecc_support = 1, 172 .i2c_support = 1, 173 .an_cdr_workaround = 1, 174 }; 175 176 static const struct rte_eth_desc_lim rx_desc_lim = { 177 .nb_max = AXGBE_MAX_RING_DESC, 178 .nb_min = AXGBE_MIN_RING_DESC, 179 .nb_align = 8, 180 }; 181 182 static const struct rte_eth_desc_lim tx_desc_lim = { 183 .nb_max = AXGBE_MAX_RING_DESC, 184 .nb_min = AXGBE_MIN_RING_DESC, 185 .nb_align = 8, 186 }; 187 188 static const struct eth_dev_ops axgbe_eth_dev_ops = { 189 .dev_configure = axgbe_dev_configure, 190 .dev_start = axgbe_dev_start, 191 .dev_stop = axgbe_dev_stop, 192 .dev_close = axgbe_dev_close, 193 .promiscuous_enable = axgbe_dev_promiscuous_enable, 194 .promiscuous_disable = axgbe_dev_promiscuous_disable, 195 .allmulticast_enable = axgbe_dev_allmulticast_enable, 196 .allmulticast_disable = axgbe_dev_allmulticast_disable, 197 .mac_addr_set = axgbe_dev_mac_addr_set, 198 .mac_addr_add = axgbe_dev_mac_addr_add, 199 .mac_addr_remove = axgbe_dev_mac_addr_remove, 200 .set_mc_addr_list = axgbe_dev_set_mc_addr_list, 201 .uc_hash_table_set = axgbe_dev_uc_hash_table_set, 202 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set, 203 .link_update = axgbe_dev_link_update, 204 .get_reg = axgbe_dev_get_regs, 205 .stats_get = axgbe_dev_stats_get, 206 .stats_reset = axgbe_dev_stats_reset, 207 .xstats_get = axgbe_dev_xstats_get, 208 .xstats_reset = axgbe_dev_xstats_reset, 209 .xstats_get_names = axgbe_dev_xstats_get_names, 210 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id, 211 .xstats_get_by_id = axgbe_dev_xstats_get_by_id, 212 .reta_update = axgbe_dev_rss_reta_update, 213 .reta_query = axgbe_dev_rss_reta_query, 214 .rss_hash_update = axgbe_dev_rss_hash_update, 215 .rss_hash_conf_get = axgbe_dev_rss_hash_conf_get, 216 .dev_infos_get = axgbe_dev_info_get, 217 .rx_queue_setup = axgbe_dev_rx_queue_setup, 218 .rx_queue_release = axgbe_dev_rx_queue_release, 219 .tx_queue_setup = axgbe_dev_tx_queue_setup, 220 .tx_queue_release = axgbe_dev_tx_queue_release, 221 .flow_ctrl_get = axgbe_flow_ctrl_get, 222 .flow_ctrl_set = axgbe_flow_ctrl_set, 223 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set, 224 .rxq_info_get = axgbe_rxq_info_get, 225 .txq_info_get = axgbe_txq_info_get, 226 .dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get, 227 .mtu_set = axgb_mtu_set, 228 }; 229 230 static int axgbe_phy_reset(struct axgbe_port *pdata) 231 { 232 pdata->phy_link = -1; 233 pdata->phy_speed = SPEED_UNKNOWN; 234 return pdata->phy_if.phy_reset(pdata); 235 } 236 237 /* 238 * Interrupt handler triggered by NIC for handling 239 * specific interrupt. 240 * 241 * @param handle 242 * Pointer to interrupt handle. 243 * @param param 244 * The address of parameter (struct rte_eth_dev *) regsitered before. 245 * 246 * @return 247 * void 248 */ 249 static void 250 axgbe_dev_interrupt_handler(void *param) 251 { 252 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 253 struct axgbe_port *pdata = dev->data->dev_private; 254 unsigned int dma_isr, dma_ch_isr; 255 256 pdata->phy_if.an_isr(pdata); 257 /*DMA related interrupts*/ 258 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 259 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr); 260 if (dma_isr) { 261 if (dma_isr & 1) { 262 dma_ch_isr = 263 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 264 pdata->rx_queues[0], 265 DMA_CH_SR); 266 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr); 267 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 268 pdata->rx_queues[0], 269 DMA_CH_SR, dma_ch_isr); 270 } 271 } 272 /* Unmask interrupts since disabled after generation */ 273 rte_intr_ack(&pdata->pci_dev->intr_handle); 274 } 275 276 /* 277 * Configure device link speed and setup link. 278 * It returns 0 on success. 279 */ 280 static int 281 axgbe_dev_configure(struct rte_eth_dev *dev) 282 { 283 struct axgbe_port *pdata = dev->data->dev_private; 284 /* Checksum offload to hardware */ 285 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 286 DEV_RX_OFFLOAD_CHECKSUM; 287 return 0; 288 } 289 290 static int 291 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 292 { 293 struct axgbe_port *pdata = dev->data->dev_private; 294 295 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 296 pdata->rss_enable = 1; 297 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 298 pdata->rss_enable = 0; 299 else 300 return -1; 301 return 0; 302 } 303 304 static int 305 axgbe_dev_start(struct rte_eth_dev *dev) 306 { 307 struct axgbe_port *pdata = dev->data->dev_private; 308 int ret; 309 struct rte_eth_dev_data *dev_data = dev->data; 310 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len; 311 312 dev->dev_ops = &axgbe_eth_dev_ops; 313 314 PMD_INIT_FUNC_TRACE(); 315 316 /* Multiqueue RSS */ 317 ret = axgbe_dev_rx_mq_config(dev); 318 if (ret) { 319 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 320 return ret; 321 } 322 ret = axgbe_phy_reset(pdata); 323 if (ret) { 324 PMD_DRV_LOG(ERR, "phy reset failed\n"); 325 return ret; 326 } 327 ret = pdata->hw_if.init(pdata); 328 if (ret) { 329 PMD_DRV_LOG(ERR, "dev_init failed\n"); 330 return ret; 331 } 332 333 /* enable uio/vfio intr/eventfd mapping */ 334 rte_intr_enable(&pdata->pci_dev->intr_handle); 335 336 /* phy start*/ 337 pdata->phy_if.phy_start(pdata); 338 axgbe_dev_enable_tx(dev); 339 axgbe_dev_enable_rx(dev); 340 341 rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state); 342 rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state); 343 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || 344 max_pkt_len > pdata->rx_buf_size) 345 dev_data->scattered_rx = 1; 346 347 /* Scatter Rx handling */ 348 if (dev_data->scattered_rx) 349 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; 350 else 351 dev->rx_pkt_burst = &axgbe_recv_pkts; 352 353 return 0; 354 } 355 356 /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 357 static void 358 axgbe_dev_stop(struct rte_eth_dev *dev) 359 { 360 struct axgbe_port *pdata = dev->data->dev_private; 361 362 PMD_INIT_FUNC_TRACE(); 363 364 rte_intr_disable(&pdata->pci_dev->intr_handle); 365 366 if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state)) 367 return; 368 369 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state); 370 axgbe_dev_disable_tx(dev); 371 axgbe_dev_disable_rx(dev); 372 373 pdata->phy_if.phy_stop(pdata); 374 pdata->hw_if.exit(pdata); 375 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 376 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state); 377 } 378 379 /* Clear all resources like TX/RX queues. */ 380 static void 381 axgbe_dev_close(struct rte_eth_dev *dev) 382 { 383 axgbe_dev_clear_queues(dev); 384 } 385 386 static int 387 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 388 { 389 struct axgbe_port *pdata = dev->data->dev_private; 390 391 PMD_INIT_FUNC_TRACE(); 392 393 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 394 395 return 0; 396 } 397 398 static int 399 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 400 { 401 struct axgbe_port *pdata = dev->data->dev_private; 402 403 PMD_INIT_FUNC_TRACE(); 404 405 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 406 407 return 0; 408 } 409 410 static int 411 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 412 { 413 struct axgbe_port *pdata = dev->data->dev_private; 414 415 PMD_INIT_FUNC_TRACE(); 416 417 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 418 return 0; 419 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 420 421 return 0; 422 } 423 424 static int 425 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 426 { 427 struct axgbe_port *pdata = dev->data->dev_private; 428 429 PMD_INIT_FUNC_TRACE(); 430 431 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 432 return 0; 433 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 434 435 return 0; 436 } 437 438 static int 439 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 440 { 441 struct axgbe_port *pdata = dev->data->dev_private; 442 443 /* Set Default MAC Addr */ 444 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0); 445 446 return 0; 447 } 448 449 static int 450 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 451 uint32_t index, uint32_t pool __rte_unused) 452 { 453 struct axgbe_port *pdata = dev->data->dev_private; 454 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 455 456 if (index > hw_feat->addn_mac) { 457 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 458 return -EINVAL; 459 } 460 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index); 461 return 0; 462 } 463 464 static int 465 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 466 struct rte_eth_rss_reta_entry64 *reta_conf, 467 uint16_t reta_size) 468 { 469 struct axgbe_port *pdata = dev->data->dev_private; 470 unsigned int i, idx, shift; 471 int ret; 472 473 if (!pdata->rss_enable) { 474 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 475 return -ENOTSUP; 476 } 477 478 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 479 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 480 return -EINVAL; 481 } 482 483 for (i = 0; i < reta_size; i++) { 484 idx = i / RTE_RETA_GROUP_SIZE; 485 shift = i % RTE_RETA_GROUP_SIZE; 486 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 487 continue; 488 pdata->rss_table[i] = reta_conf[idx].reta[shift]; 489 } 490 491 /* Program the lookup table */ 492 ret = axgbe_write_rss_lookup_table(pdata); 493 return ret; 494 } 495 496 static int 497 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 498 struct rte_eth_rss_reta_entry64 *reta_conf, 499 uint16_t reta_size) 500 { 501 struct axgbe_port *pdata = dev->data->dev_private; 502 unsigned int i, idx, shift; 503 504 if (!pdata->rss_enable) { 505 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 506 return -ENOTSUP; 507 } 508 509 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 510 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 511 return -EINVAL; 512 } 513 514 for (i = 0; i < reta_size; i++) { 515 idx = i / RTE_RETA_GROUP_SIZE; 516 shift = i % RTE_RETA_GROUP_SIZE; 517 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 518 continue; 519 reta_conf[idx].reta[shift] = pdata->rss_table[i]; 520 } 521 return 0; 522 } 523 524 static int 525 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 526 struct rte_eth_rss_conf *rss_conf) 527 { 528 struct axgbe_port *pdata = dev->data->dev_private; 529 int ret; 530 531 if (!pdata->rss_enable) { 532 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 533 return -ENOTSUP; 534 } 535 536 if (rss_conf == NULL) { 537 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 538 return -EINVAL; 539 } 540 541 if (rss_conf->rss_key != NULL && 542 rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) { 543 rte_memcpy(pdata->rss_key, rss_conf->rss_key, 544 AXGBE_RSS_HASH_KEY_SIZE); 545 /* Program the hash key */ 546 ret = axgbe_write_rss_hash_key(pdata); 547 if (ret != 0) 548 return ret; 549 } 550 551 pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD; 552 553 if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6)) 554 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 555 if (pdata->rss_hf & 556 (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP)) 557 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); 558 if (pdata->rss_hf & 559 (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP)) 560 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 561 562 /* Set the RSS options */ 563 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 564 565 return 0; 566 } 567 568 static int 569 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 570 struct rte_eth_rss_conf *rss_conf) 571 { 572 struct axgbe_port *pdata = dev->data->dev_private; 573 574 if (!pdata->rss_enable) { 575 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 576 return -ENOTSUP; 577 } 578 579 if (rss_conf == NULL) { 580 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 581 return -EINVAL; 582 } 583 584 if (rss_conf->rss_key != NULL && 585 rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) { 586 rte_memcpy(rss_conf->rss_key, pdata->rss_key, 587 AXGBE_RSS_HASH_KEY_SIZE); 588 } 589 rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE; 590 rss_conf->rss_hf = pdata->rss_hf; 591 return 0; 592 } 593 594 static void 595 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 596 { 597 struct axgbe_port *pdata = dev->data->dev_private; 598 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 599 600 if (index > hw_feat->addn_mac) { 601 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 602 return; 603 } 604 axgbe_set_mac_addn_addr(pdata, NULL, index); 605 } 606 607 static int 608 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 609 struct rte_ether_addr *mc_addr_set, 610 uint32_t nb_mc_addr) 611 { 612 struct axgbe_port *pdata = dev->data->dev_private; 613 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 614 uint32_t index = 1; /* 0 is always default mac */ 615 uint32_t i; 616 617 if (nb_mc_addr > hw_feat->addn_mac) { 618 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr); 619 return -EINVAL; 620 } 621 622 /* clear unicast addresses */ 623 for (i = 1; i < hw_feat->addn_mac; i++) { 624 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) 625 continue; 626 memset(&dev->data->mac_addrs[i], 0, 627 sizeof(struct rte_ether_addr)); 628 } 629 630 while (nb_mc_addr--) 631 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++); 632 633 return 0; 634 } 635 636 static int 637 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 638 struct rte_ether_addr *mac_addr, uint8_t add) 639 { 640 struct axgbe_port *pdata = dev->data->dev_private; 641 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 642 643 if (!hw_feat->hash_table_size) { 644 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 645 return -ENOTSUP; 646 } 647 648 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add); 649 650 if (pdata->uc_hash_mac_addr > 0) { 651 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 652 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 653 } else { 654 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 655 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 656 } 657 return 0; 658 } 659 660 static int 661 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add) 662 { 663 struct axgbe_port *pdata = dev->data->dev_private; 664 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 665 uint32_t index; 666 667 if (!hw_feat->hash_table_size) { 668 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 669 return -ENOTSUP; 670 } 671 672 for (index = 0; index < pdata->hash_table_count; index++) { 673 if (add) 674 pdata->uc_hash_table[index] = ~0; 675 else 676 pdata->uc_hash_table[index] = 0; 677 678 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n", 679 add ? "set" : "clear", index); 680 681 AXGMAC_IOWRITE(pdata, MAC_HTR(index), 682 pdata->uc_hash_table[index]); 683 } 684 685 if (add) { 686 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 687 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 688 } else { 689 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 690 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 691 } 692 return 0; 693 } 694 695 /* return 0 means link status changed, -1 means not changed */ 696 static int 697 axgbe_dev_link_update(struct rte_eth_dev *dev, 698 int wait_to_complete __rte_unused) 699 { 700 struct axgbe_port *pdata = dev->data->dev_private; 701 struct rte_eth_link link; 702 int ret = 0; 703 704 PMD_INIT_FUNC_TRACE(); 705 rte_delay_ms(800); 706 707 pdata->phy_if.phy_status(pdata); 708 709 memset(&link, 0, sizeof(struct rte_eth_link)); 710 link.link_duplex = pdata->phy.duplex; 711 link.link_status = pdata->phy_link; 712 link.link_speed = pdata->phy_speed; 713 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 714 ETH_LINK_SPEED_FIXED); 715 ret = rte_eth_linkstatus_set(dev, &link); 716 if (ret == -1) 717 PMD_DRV_LOG(ERR, "No change in link status\n"); 718 719 return ret; 720 } 721 722 static int 723 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 724 { 725 struct axgbe_port *pdata = dev->data->dev_private; 726 727 if (regs->data == NULL) { 728 regs->length = axgbe_regs_get_count(pdata); 729 regs->width = sizeof(uint32_t); 730 return 0; 731 } 732 733 /* Only full register dump is supported */ 734 if (regs->length && 735 regs->length != (uint32_t)axgbe_regs_get_count(pdata)) 736 return -ENOTSUP; 737 738 regs->version = pdata->pci_dev->id.vendor_id << 16 | 739 pdata->pci_dev->id.device_id; 740 axgbe_regs_dump(pdata, regs->data); 741 return 0; 742 } 743 static void axgbe_read_mmc_stats(struct axgbe_port *pdata) 744 { 745 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 746 747 /* Freeze counters */ 748 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 749 750 /* Tx counters */ 751 stats->txoctetcount_gb += 752 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 753 stats->txoctetcount_gb += 754 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32); 755 756 stats->txframecount_gb += 757 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 758 stats->txframecount_gb += 759 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32); 760 761 stats->txbroadcastframes_g += 762 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 763 stats->txbroadcastframes_g += 764 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32); 765 766 stats->txmulticastframes_g += 767 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 768 stats->txmulticastframes_g += 769 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32); 770 771 stats->tx64octets_gb += 772 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 773 stats->tx64octets_gb += 774 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32); 775 776 stats->tx65to127octets_gb += 777 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 778 stats->tx65to127octets_gb += 779 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32); 780 781 stats->tx128to255octets_gb += 782 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 783 stats->tx128to255octets_gb += 784 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32); 785 786 stats->tx256to511octets_gb += 787 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 788 stats->tx256to511octets_gb += 789 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32); 790 791 stats->tx512to1023octets_gb += 792 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 793 stats->tx512to1023octets_gb += 794 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32); 795 796 stats->tx1024tomaxoctets_gb += 797 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 798 stats->tx1024tomaxoctets_gb += 799 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32); 800 801 stats->txunicastframes_gb += 802 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 803 stats->txunicastframes_gb += 804 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32); 805 806 stats->txmulticastframes_gb += 807 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 808 stats->txmulticastframes_gb += 809 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32); 810 811 stats->txbroadcastframes_g += 812 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 813 stats->txbroadcastframes_g += 814 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32); 815 816 stats->txunderflowerror += 817 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 818 stats->txunderflowerror += 819 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32); 820 821 stats->txoctetcount_g += 822 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 823 stats->txoctetcount_g += 824 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32); 825 826 stats->txframecount_g += 827 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 828 stats->txframecount_g += 829 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32); 830 831 stats->txpauseframes += 832 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 833 stats->txpauseframes += 834 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32); 835 836 stats->txvlanframes_g += 837 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 838 stats->txvlanframes_g += 839 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32); 840 841 /* Rx counters */ 842 stats->rxframecount_gb += 843 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 844 stats->rxframecount_gb += 845 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32); 846 847 stats->rxoctetcount_gb += 848 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 849 stats->rxoctetcount_gb += 850 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32); 851 852 stats->rxoctetcount_g += 853 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 854 stats->rxoctetcount_g += 855 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32); 856 857 stats->rxbroadcastframes_g += 858 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 859 stats->rxbroadcastframes_g += 860 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32); 861 862 stats->rxmulticastframes_g += 863 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 864 stats->rxmulticastframes_g += 865 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32); 866 867 stats->rxcrcerror += 868 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 869 stats->rxcrcerror += 870 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32); 871 872 stats->rxrunterror += 873 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 874 875 stats->rxjabbererror += 876 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 877 878 stats->rxundersize_g += 879 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 880 881 stats->rxoversize_g += 882 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 883 884 stats->rx64octets_gb += 885 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 886 stats->rx64octets_gb += 887 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32); 888 889 stats->rx65to127octets_gb += 890 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 891 stats->rx65to127octets_gb += 892 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32); 893 894 stats->rx128to255octets_gb += 895 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 896 stats->rx128to255octets_gb += 897 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32); 898 899 stats->rx256to511octets_gb += 900 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 901 stats->rx256to511octets_gb += 902 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32); 903 904 stats->rx512to1023octets_gb += 905 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 906 stats->rx512to1023octets_gb += 907 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32); 908 909 stats->rx1024tomaxoctets_gb += 910 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 911 stats->rx1024tomaxoctets_gb += 912 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32); 913 914 stats->rxunicastframes_g += 915 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 916 stats->rxunicastframes_g += 917 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32); 918 919 stats->rxlengtherror += 920 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 921 stats->rxlengtherror += 922 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32); 923 924 stats->rxoutofrangetype += 925 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 926 stats->rxoutofrangetype += 927 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32); 928 929 stats->rxpauseframes += 930 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 931 stats->rxpauseframes += 932 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32); 933 934 stats->rxfifooverflow += 935 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 936 stats->rxfifooverflow += 937 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32); 938 939 stats->rxvlanframes_gb += 940 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 941 stats->rxvlanframes_gb += 942 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32); 943 944 stats->rxwatchdogerror += 945 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 946 947 /* Un-freeze counters */ 948 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 949 } 950 951 static int 952 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 953 unsigned int n) 954 { 955 struct axgbe_port *pdata = dev->data->dev_private; 956 unsigned int i; 957 958 if (!stats) 959 return 0; 960 961 axgbe_read_mmc_stats(pdata); 962 963 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) { 964 stats[i].id = i; 965 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats + 966 axgbe_xstats_strings[i].offset); 967 } 968 969 return i; 970 } 971 972 static int 973 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 974 struct rte_eth_xstat_name *xstats_names, 975 unsigned int n) 976 { 977 unsigned int i; 978 979 if (n >= AXGBE_XSTATS_COUNT && xstats_names) { 980 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) { 981 snprintf(xstats_names[i].name, 982 RTE_ETH_XSTATS_NAME_SIZE, "%s", 983 axgbe_xstats_strings[i].name); 984 } 985 } 986 987 return AXGBE_XSTATS_COUNT; 988 } 989 990 static int 991 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 992 uint64_t *values, unsigned int n) 993 { 994 unsigned int i; 995 uint64_t values_copy[AXGBE_XSTATS_COUNT]; 996 997 if (!ids) { 998 struct axgbe_port *pdata = dev->data->dev_private; 999 1000 if (n < AXGBE_XSTATS_COUNT) 1001 return AXGBE_XSTATS_COUNT; 1002 1003 axgbe_read_mmc_stats(pdata); 1004 1005 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { 1006 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats + 1007 axgbe_xstats_strings[i].offset); 1008 } 1009 1010 return i; 1011 } 1012 1013 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT); 1014 1015 for (i = 0; i < n; i++) { 1016 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1017 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1018 return -1; 1019 } 1020 values[i] = values_copy[ids[i]]; 1021 } 1022 return n; 1023 } 1024 1025 static int 1026 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 1027 struct rte_eth_xstat_name *xstats_names, 1028 const uint64_t *ids, 1029 unsigned int size) 1030 { 1031 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT]; 1032 unsigned int i; 1033 1034 if (!ids) 1035 return axgbe_dev_xstats_get_names(dev, xstats_names, size); 1036 1037 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size); 1038 1039 for (i = 0; i < size; i++) { 1040 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1041 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1042 return -1; 1043 } 1044 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1045 } 1046 return size; 1047 } 1048 1049 static int 1050 axgbe_dev_xstats_reset(struct rte_eth_dev *dev) 1051 { 1052 struct axgbe_port *pdata = dev->data->dev_private; 1053 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 1054 1055 /* MMC registers are configured for reset on read */ 1056 axgbe_read_mmc_stats(pdata); 1057 1058 /* Reset stats */ 1059 memset(stats, 0, sizeof(*stats)); 1060 1061 return 0; 1062 } 1063 1064 static int 1065 axgbe_dev_stats_get(struct rte_eth_dev *dev, 1066 struct rte_eth_stats *stats) 1067 { 1068 struct axgbe_rx_queue *rxq; 1069 struct axgbe_tx_queue *txq; 1070 struct axgbe_port *pdata = dev->data->dev_private; 1071 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats; 1072 unsigned int i; 1073 1074 axgbe_read_mmc_stats(pdata); 1075 1076 stats->imissed = mmc_stats->rxfifooverflow; 1077 1078 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1079 rxq = dev->data->rx_queues[i]; 1080 stats->q_ipackets[i] = rxq->pkts; 1081 stats->ipackets += rxq->pkts; 1082 stats->q_ibytes[i] = rxq->bytes; 1083 stats->ibytes += rxq->bytes; 1084 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed; 1085 stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed; 1086 stats->ierrors += rxq->errors; 1087 } 1088 1089 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1090 txq = dev->data->tx_queues[i]; 1091 stats->q_opackets[i] = txq->pkts; 1092 stats->opackets += txq->pkts; 1093 stats->q_obytes[i] = txq->bytes; 1094 stats->obytes += txq->bytes; 1095 stats->oerrors += txq->errors; 1096 } 1097 1098 return 0; 1099 } 1100 1101 static int 1102 axgbe_dev_stats_reset(struct rte_eth_dev *dev) 1103 { 1104 struct axgbe_rx_queue *rxq; 1105 struct axgbe_tx_queue *txq; 1106 unsigned int i; 1107 1108 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1109 rxq = dev->data->rx_queues[i]; 1110 rxq->pkts = 0; 1111 rxq->bytes = 0; 1112 rxq->errors = 0; 1113 rxq->rx_mbuf_alloc_failed = 0; 1114 } 1115 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1116 txq = dev->data->tx_queues[i]; 1117 txq->pkts = 0; 1118 txq->bytes = 0; 1119 txq->errors = 0; 1120 } 1121 1122 return 0; 1123 } 1124 1125 static int 1126 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1127 { 1128 struct axgbe_port *pdata = dev->data->dev_private; 1129 1130 dev_info->max_rx_queues = pdata->rx_ring_count; 1131 dev_info->max_tx_queues = pdata->tx_ring_count; 1132 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 1133 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 1134 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1; 1135 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size; 1136 dev_info->speed_capa = ETH_LINK_SPEED_10G; 1137 1138 dev_info->rx_offload_capa = 1139 DEV_RX_OFFLOAD_IPV4_CKSUM | 1140 DEV_RX_OFFLOAD_UDP_CKSUM | 1141 DEV_RX_OFFLOAD_TCP_CKSUM | 1142 DEV_RX_OFFLOAD_JUMBO_FRAME | 1143 DEV_RX_OFFLOAD_SCATTER | 1144 DEV_RX_OFFLOAD_KEEP_CRC; 1145 1146 dev_info->tx_offload_capa = 1147 DEV_TX_OFFLOAD_IPV4_CKSUM | 1148 DEV_TX_OFFLOAD_UDP_CKSUM | 1149 DEV_TX_OFFLOAD_TCP_CKSUM; 1150 1151 if (pdata->hw_feat.rss) { 1152 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 1153 dev_info->reta_size = pdata->hw_feat.hash_table_size; 1154 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 1155 } 1156 1157 dev_info->rx_desc_lim = rx_desc_lim; 1158 dev_info->tx_desc_lim = tx_desc_lim; 1159 1160 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1161 .rx_free_thresh = AXGBE_RX_FREE_THRESH, 1162 }; 1163 1164 dev_info->default_txconf = (struct rte_eth_txconf) { 1165 .tx_free_thresh = AXGBE_TX_FREE_THRESH, 1166 }; 1167 1168 return 0; 1169 } 1170 1171 static int 1172 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1173 { 1174 struct axgbe_port *pdata = dev->data->dev_private; 1175 struct xgbe_fc_info fc = pdata->fc; 1176 unsigned int reg, reg_val = 0; 1177 1178 reg = MAC_Q0TFCR; 1179 reg_val = AXGMAC_IOREAD(pdata, reg); 1180 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA); 1181 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD); 1182 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT); 1183 fc.autoneg = pdata->pause_autoneg; 1184 1185 if (pdata->rx_pause && pdata->tx_pause) 1186 fc.mode = RTE_FC_FULL; 1187 else if (pdata->rx_pause) 1188 fc.mode = RTE_FC_RX_PAUSE; 1189 else if (pdata->tx_pause) 1190 fc.mode = RTE_FC_TX_PAUSE; 1191 else 1192 fc.mode = RTE_FC_NONE; 1193 1194 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024; 1195 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024; 1196 fc_conf->pause_time = fc.pause_time[0]; 1197 fc_conf->send_xon = fc.send_xon; 1198 fc_conf->mode = fc.mode; 1199 1200 return 0; 1201 } 1202 1203 static int 1204 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1205 { 1206 struct axgbe_port *pdata = dev->data->dev_private; 1207 struct xgbe_fc_info fc = pdata->fc; 1208 unsigned int reg, reg_val = 0; 1209 reg = MAC_Q0TFCR; 1210 1211 pdata->pause_autoneg = fc_conf->autoneg; 1212 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1213 fc.send_xon = fc_conf->send_xon; 1214 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA, 1215 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water)); 1216 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD, 1217 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water)); 1218 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time); 1219 AXGMAC_IOWRITE(pdata, reg, reg_val); 1220 fc.mode = fc_conf->mode; 1221 1222 if (fc.mode == RTE_FC_FULL) { 1223 pdata->tx_pause = 1; 1224 pdata->rx_pause = 1; 1225 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1226 pdata->tx_pause = 0; 1227 pdata->rx_pause = 1; 1228 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1229 pdata->tx_pause = 1; 1230 pdata->rx_pause = 0; 1231 } else { 1232 pdata->tx_pause = 0; 1233 pdata->rx_pause = 0; 1234 } 1235 1236 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1237 pdata->hw_if.config_tx_flow_control(pdata); 1238 1239 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1240 pdata->hw_if.config_rx_flow_control(pdata); 1241 1242 pdata->hw_if.config_flow_control(pdata); 1243 pdata->phy.tx_pause = pdata->tx_pause; 1244 pdata->phy.rx_pause = pdata->rx_pause; 1245 1246 return 0; 1247 } 1248 1249 static int 1250 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 1251 struct rte_eth_pfc_conf *pfc_conf) 1252 { 1253 struct axgbe_port *pdata = dev->data->dev_private; 1254 struct xgbe_fc_info fc = pdata->fc; 1255 uint8_t tc_num; 1256 1257 tc_num = pdata->pfc_map[pfc_conf->priority]; 1258 1259 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { 1260 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", 1261 pdata->hw_feat.tc_cnt); 1262 return -EINVAL; 1263 } 1264 1265 pdata->pause_autoneg = pfc_conf->fc.autoneg; 1266 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1267 fc.send_xon = pfc_conf->fc.send_xon; 1268 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA, 1269 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water)); 1270 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD, 1271 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water)); 1272 1273 switch (tc_num) { 1274 case 0: 1275 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1276 PSTC0, pfc_conf->fc.pause_time); 1277 break; 1278 case 1: 1279 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1280 PSTC1, pfc_conf->fc.pause_time); 1281 break; 1282 case 2: 1283 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1284 PSTC2, pfc_conf->fc.pause_time); 1285 break; 1286 case 3: 1287 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1288 PSTC3, pfc_conf->fc.pause_time); 1289 break; 1290 case 4: 1291 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1292 PSTC4, pfc_conf->fc.pause_time); 1293 break; 1294 case 5: 1295 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1296 PSTC5, pfc_conf->fc.pause_time); 1297 break; 1298 case 7: 1299 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1300 PSTC6, pfc_conf->fc.pause_time); 1301 break; 1302 case 6: 1303 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1304 PSTC7, pfc_conf->fc.pause_time); 1305 break; 1306 } 1307 1308 fc.mode = pfc_conf->fc.mode; 1309 1310 if (fc.mode == RTE_FC_FULL) { 1311 pdata->tx_pause = 1; 1312 pdata->rx_pause = 1; 1313 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1314 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1315 pdata->tx_pause = 0; 1316 pdata->rx_pause = 1; 1317 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1318 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1319 pdata->tx_pause = 1; 1320 pdata->rx_pause = 0; 1321 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1322 } else { 1323 pdata->tx_pause = 0; 1324 pdata->rx_pause = 0; 1325 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1326 } 1327 1328 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1329 pdata->hw_if.config_tx_flow_control(pdata); 1330 1331 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1332 pdata->hw_if.config_rx_flow_control(pdata); 1333 pdata->hw_if.config_flow_control(pdata); 1334 pdata->phy.tx_pause = pdata->tx_pause; 1335 pdata->phy.rx_pause = pdata->rx_pause; 1336 1337 return 0; 1338 } 1339 1340 void 1341 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1342 struct rte_eth_rxq_info *qinfo) 1343 { 1344 struct axgbe_rx_queue *rxq; 1345 1346 rxq = dev->data->rx_queues[queue_id]; 1347 qinfo->mp = rxq->mb_pool; 1348 qinfo->scattered_rx = dev->data->scattered_rx; 1349 qinfo->nb_desc = rxq->nb_desc; 1350 qinfo->conf.rx_free_thresh = rxq->free_thresh; 1351 } 1352 1353 void 1354 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1355 struct rte_eth_txq_info *qinfo) 1356 { 1357 struct axgbe_tx_queue *txq; 1358 1359 txq = dev->data->tx_queues[queue_id]; 1360 qinfo->nb_desc = txq->nb_desc; 1361 qinfo->conf.tx_free_thresh = txq->free_thresh; 1362 } 1363 const uint32_t * 1364 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1365 { 1366 static const uint32_t ptypes[] = { 1367 RTE_PTYPE_L2_ETHER, 1368 RTE_PTYPE_L2_ETHER_TIMESYNC, 1369 RTE_PTYPE_L2_ETHER_LLDP, 1370 RTE_PTYPE_L2_ETHER_ARP, 1371 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1372 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 1373 RTE_PTYPE_L4_FRAG, 1374 RTE_PTYPE_L4_ICMP, 1375 RTE_PTYPE_L4_NONFRAG, 1376 RTE_PTYPE_L4_SCTP, 1377 RTE_PTYPE_L4_TCP, 1378 RTE_PTYPE_L4_UDP, 1379 RTE_PTYPE_TUNNEL_GRENAT, 1380 RTE_PTYPE_TUNNEL_IP, 1381 RTE_PTYPE_INNER_L2_ETHER, 1382 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1383 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 1384 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 1385 RTE_PTYPE_INNER_L4_FRAG, 1386 RTE_PTYPE_INNER_L4_ICMP, 1387 RTE_PTYPE_INNER_L4_NONFRAG, 1388 RTE_PTYPE_INNER_L4_SCTP, 1389 RTE_PTYPE_INNER_L4_TCP, 1390 RTE_PTYPE_INNER_L4_UDP, 1391 RTE_PTYPE_UNKNOWN 1392 }; 1393 1394 if (dev->rx_pkt_burst == axgbe_recv_pkts) 1395 return ptypes; 1396 return NULL; 1397 } 1398 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1399 { 1400 struct rte_eth_dev_info dev_info; 1401 struct axgbe_port *pdata = dev->data->dev_private; 1402 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1403 unsigned int val = 0; 1404 axgbe_dev_info_get(dev, &dev_info); 1405 /* check that mtu is within the allowed range */ 1406 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 1407 return -EINVAL; 1408 /* mtu setting is forbidden if port is start */ 1409 if (dev->data->dev_started) { 1410 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", 1411 dev->data->port_id); 1412 return -EBUSY; 1413 } 1414 if (frame_size > RTE_ETHER_MAX_LEN) { 1415 dev->data->dev_conf.rxmode.offloads |= 1416 DEV_RX_OFFLOAD_JUMBO_FRAME; 1417 val = 1; 1418 } else { 1419 dev->data->dev_conf.rxmode.offloads &= 1420 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1421 val = 0; 1422 } 1423 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 1424 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1425 return 0; 1426 } 1427 static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 1428 { 1429 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 1430 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 1431 1432 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 1433 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 1434 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 1435 1436 memset(hw_feat, 0, sizeof(*hw_feat)); 1437 1438 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 1439 1440 /* Hardware feature register 0 */ 1441 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 1442 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 1443 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 1444 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 1445 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 1446 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 1447 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 1448 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 1449 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 1450 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 1451 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 1452 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 1453 ADDMACADRSEL); 1454 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 1455 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 1456 1457 /* Hardware feature register 1 */ 1458 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1459 RXFIFOSIZE); 1460 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1461 TXFIFOSIZE); 1462 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 1463 MAC_HWF1R, ADVTHWORD); 1464 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 1465 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 1466 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 1467 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 1468 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 1469 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 1470 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 1471 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1472 HASHTBLSZ); 1473 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1474 L3L4FNUM); 1475 1476 /* Hardware feature register 2 */ 1477 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 1478 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 1479 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 1480 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 1481 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 1482 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 1483 AUXSNAPNUM); 1484 1485 /* Translate the Hash Table size into actual number */ 1486 switch (hw_feat->hash_table_size) { 1487 case 0: 1488 break; 1489 case 1: 1490 hw_feat->hash_table_size = 64; 1491 break; 1492 case 2: 1493 hw_feat->hash_table_size = 128; 1494 break; 1495 case 3: 1496 hw_feat->hash_table_size = 256; 1497 break; 1498 } 1499 1500 /* Translate the address width setting into actual number */ 1501 switch (hw_feat->dma_width) { 1502 case 0: 1503 hw_feat->dma_width = 32; 1504 break; 1505 case 1: 1506 hw_feat->dma_width = 40; 1507 break; 1508 case 2: 1509 hw_feat->dma_width = 48; 1510 break; 1511 default: 1512 hw_feat->dma_width = 32; 1513 } 1514 1515 /* The Queue, Channel and TC counts are zero based so increment them 1516 * to get the actual number 1517 */ 1518 hw_feat->rx_q_cnt++; 1519 hw_feat->tx_q_cnt++; 1520 hw_feat->rx_ch_cnt++; 1521 hw_feat->tx_ch_cnt++; 1522 hw_feat->tc_cnt++; 1523 1524 /* Translate the fifo sizes into actual numbers */ 1525 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 1526 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 1527 } 1528 1529 static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 1530 { 1531 axgbe_init_function_ptrs_dev(&pdata->hw_if); 1532 axgbe_init_function_ptrs_phy(&pdata->phy_if); 1533 axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 1534 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 1535 } 1536 1537 static void axgbe_set_counts(struct axgbe_port *pdata) 1538 { 1539 /* Set all the function pointers */ 1540 axgbe_init_all_fptrs(pdata); 1541 1542 /* Populate the hardware features */ 1543 axgbe_get_all_hw_features(pdata); 1544 1545 /* Set default max values if not provided */ 1546 if (!pdata->tx_max_channel_count) 1547 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 1548 if (!pdata->rx_max_channel_count) 1549 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 1550 1551 if (!pdata->tx_max_q_count) 1552 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 1553 if (!pdata->rx_max_q_count) 1554 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 1555 1556 /* Calculate the number of Tx and Rx rings to be created 1557 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 1558 * the number of Tx queues to the number of Tx channels 1559 * enabled 1560 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 1561 * number of Rx queues or maximum allowed 1562 */ 1563 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 1564 pdata->tx_max_channel_count); 1565 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 1566 pdata->tx_max_q_count); 1567 1568 pdata->tx_q_count = pdata->tx_ring_count; 1569 1570 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 1571 pdata->rx_max_channel_count); 1572 1573 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 1574 pdata->rx_max_q_count); 1575 } 1576 1577 static void axgbe_default_config(struct axgbe_port *pdata) 1578 { 1579 pdata->pblx8 = DMA_PBL_X8_ENABLE; 1580 pdata->tx_sf_mode = MTL_TSF_ENABLE; 1581 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 1582 pdata->tx_pbl = DMA_PBL_32; 1583 pdata->tx_osp_mode = DMA_OSP_ENABLE; 1584 pdata->rx_sf_mode = MTL_RSF_ENABLE; 1585 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 1586 pdata->rx_pbl = DMA_PBL_32; 1587 pdata->pause_autoneg = 1; 1588 pdata->tx_pause = 0; 1589 pdata->rx_pause = 0; 1590 pdata->phy_speed = SPEED_UNKNOWN; 1591 pdata->power_down = 0; 1592 } 1593 1594 static int 1595 pci_device_cmp(const struct rte_device *dev, const void *_pci_id) 1596 { 1597 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); 1598 const struct rte_pci_id *pcid = _pci_id; 1599 1600 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID && 1601 pdev->id.device_id == pcid->device_id) 1602 return 0; 1603 return 1; 1604 } 1605 1606 static bool 1607 pci_search_device(int device_id) 1608 { 1609 struct rte_bus *pci_bus; 1610 struct rte_pci_id dev_id; 1611 1612 dev_id.device_id = device_id; 1613 pci_bus = rte_bus_find_by_name("pci"); 1614 return (pci_bus != NULL) && 1615 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL); 1616 } 1617 1618 /* 1619 * It returns 0 on success. 1620 */ 1621 static int 1622 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 1623 { 1624 PMD_INIT_FUNC_TRACE(); 1625 struct axgbe_port *pdata; 1626 struct rte_pci_device *pci_dev; 1627 uint32_t reg, mac_lo, mac_hi; 1628 uint32_t len; 1629 int ret; 1630 1631 eth_dev->dev_ops = &axgbe_eth_dev_ops; 1632 1633 eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status; 1634 eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status; 1635 1636 /* 1637 * For secondary processes, we don't initialise any further as primary 1638 * has already done this work. 1639 */ 1640 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1641 return 0; 1642 1643 pdata = eth_dev->data->dev_private; 1644 /* initial state */ 1645 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state); 1646 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state); 1647 pdata->eth_dev = eth_dev; 1648 1649 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1650 pdata->pci_dev = pci_dev; 1651 1652 /* 1653 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE 1654 */ 1655 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) { 1656 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; 1657 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; 1658 } else { 1659 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 1660 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 1661 } 1662 1663 pdata->xgmac_regs = 1664 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 1665 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 1666 + AXGBE_MAC_PROP_OFFSET); 1667 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 1668 + AXGBE_I2C_CTRL_OFFSET); 1669 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 1670 1671 /* version specific driver data*/ 1672 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 1673 pdata->vdata = &axgbe_v2a; 1674 else 1675 pdata->vdata = &axgbe_v2b; 1676 1677 /* Configure the PCS indirect addressing support */ 1678 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); 1679 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 1680 pdata->xpcs_window <<= 6; 1681 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 1682 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 1683 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 1684 1685 PMD_INIT_LOG(DEBUG, 1686 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 1687 pdata->xpcs_window_size, pdata->xpcs_window_mask); 1688 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 1689 1690 /* Retrieve the MAC address */ 1691 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 1692 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 1693 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 1694 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 1695 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 1696 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 1697 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 1698 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 1699 1700 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS; 1701 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0); 1702 1703 if (!eth_dev->data->mac_addrs) { 1704 PMD_INIT_LOG(ERR, 1705 "Failed to alloc %u bytes needed to " 1706 "store MAC addresses", len); 1707 return -ENOMEM; 1708 } 1709 1710 /* Allocate memory for storing hash filter MAC addresses */ 1711 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS; 1712 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr", 1713 len, 0); 1714 1715 if (eth_dev->data->hash_mac_addrs == NULL) { 1716 PMD_INIT_LOG(ERR, 1717 "Failed to allocate %d bytes needed to " 1718 "store MAC addresses", len); 1719 return -ENOMEM; 1720 } 1721 1722 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 1723 rte_eth_random_addr(pdata->mac_addr.addr_bytes); 1724 1725 /* Copy the permanent MAC address */ 1726 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 1727 1728 /* Clock settings */ 1729 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 1730 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 1731 1732 /* Set the DMA coherency values */ 1733 pdata->coherent = 1; 1734 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 1735 pdata->arcache = AXGBE_DMA_OS_ARCACHE; 1736 pdata->awcache = AXGBE_DMA_OS_AWCACHE; 1737 1738 /* Set the maximum channels and queues */ 1739 reg = XP_IOREAD(pdata, XP_PROP_1); 1740 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 1741 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 1742 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 1743 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 1744 1745 /* Set the hardware channel and queue counts */ 1746 axgbe_set_counts(pdata); 1747 1748 /* Set the maximum fifo amounts */ 1749 reg = XP_IOREAD(pdata, XP_PROP_2); 1750 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 1751 pdata->tx_max_fifo_size *= 16384; 1752 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 1753 pdata->vdata->tx_max_fifo_size); 1754 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 1755 pdata->rx_max_fifo_size *= 16384; 1756 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 1757 pdata->vdata->rx_max_fifo_size); 1758 /* Issue software reset to DMA */ 1759 ret = pdata->hw_if.exit(pdata); 1760 if (ret) 1761 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 1762 1763 /* Set default configuration data */ 1764 axgbe_default_config(pdata); 1765 1766 /* Set default max values if not provided */ 1767 if (!pdata->tx_max_fifo_size) 1768 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 1769 if (!pdata->rx_max_fifo_size) 1770 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 1771 1772 pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 1773 pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 1774 pthread_mutex_init(&pdata->xpcs_mutex, NULL); 1775 pthread_mutex_init(&pdata->i2c_mutex, NULL); 1776 pthread_mutex_init(&pdata->an_mutex, NULL); 1777 pthread_mutex_init(&pdata->phy_mutex, NULL); 1778 1779 ret = pdata->phy_if.phy_init(pdata); 1780 if (ret) { 1781 rte_free(eth_dev->data->mac_addrs); 1782 eth_dev->data->mac_addrs = NULL; 1783 return ret; 1784 } 1785 1786 rte_intr_callback_register(&pci_dev->intr_handle, 1787 axgbe_dev_interrupt_handler, 1788 (void *)eth_dev); 1789 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1790 eth_dev->data->port_id, pci_dev->id.vendor_id, 1791 pci_dev->id.device_id); 1792 1793 return 0; 1794 } 1795 1796 static int 1797 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1798 { 1799 struct rte_pci_device *pci_dev; 1800 1801 PMD_INIT_FUNC_TRACE(); 1802 1803 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1804 return 0; 1805 1806 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1807 eth_dev->dev_ops = NULL; 1808 eth_dev->rx_pkt_burst = NULL; 1809 eth_dev->tx_pkt_burst = NULL; 1810 axgbe_dev_clear_queues(eth_dev); 1811 1812 /* disable uio intr before callback unregister */ 1813 rte_intr_disable(&pci_dev->intr_handle); 1814 rte_intr_callback_unregister(&pci_dev->intr_handle, 1815 axgbe_dev_interrupt_handler, 1816 (void *)eth_dev); 1817 1818 return 0; 1819 } 1820 1821 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1822 struct rte_pci_device *pci_dev) 1823 { 1824 return rte_eth_dev_pci_generic_probe(pci_dev, 1825 sizeof(struct axgbe_port), eth_axgbe_dev_init); 1826 } 1827 1828 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 1829 { 1830 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 1831 } 1832 1833 static struct rte_pci_driver rte_axgbe_pmd = { 1834 .id_table = pci_id_axgbe_map, 1835 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1836 .probe = eth_axgbe_pci_probe, 1837 .remove = eth_axgbe_pci_remove, 1838 }; 1839 1840 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 1841 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 1842 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 1843 RTE_LOG_REGISTER(axgbe_logtype_init, pmd.net.axgbe.init, NOTICE); 1844 RTE_LOG_REGISTER(axgbe_logtype_driver, pmd.net.axgbe.driver, NOTICE); 1845