1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_rxtx.h" 7 #include "axgbe_ethdev.h" 8 #include "axgbe_common.h" 9 #include "axgbe_phy.h" 10 #include "axgbe_regs.h" 11 12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 14 static int axgbe_dev_configure(struct rte_eth_dev *dev); 15 static int axgbe_dev_start(struct rte_eth_dev *dev); 16 static void axgbe_dev_stop(struct rte_eth_dev *dev); 17 static void axgbe_dev_interrupt_handler(void *param); 18 static void axgbe_dev_close(struct rte_eth_dev *dev); 19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, 24 struct rte_ether_addr *mac_addr); 25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, 26 struct rte_ether_addr *mac_addr, 27 uint32_t index, 28 uint32_t vmdq); 29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 31 struct rte_ether_addr *mc_addr_set, 32 uint32_t nb_mc_addr); 33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 34 struct rte_ether_addr *mac_addr, 35 uint8_t add); 36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, 37 uint8_t add); 38 static int axgbe_dev_link_update(struct rte_eth_dev *dev, 39 int wait_to_complete); 40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev, 41 struct rte_dev_reg_info *regs); 42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 43 struct rte_eth_stats *stats); 44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev); 45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev, 46 struct rte_eth_xstat *stats, 47 unsigned int n); 48 static int 49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 50 struct rte_eth_xstat_name *xstats_names, 51 unsigned int size); 52 static int 53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, 54 const uint64_t *ids, 55 uint64_t *values, 56 unsigned int n); 57 static int 58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 59 struct rte_eth_xstat_name *xstats_names, 60 const uint64_t *ids, 61 unsigned int size); 62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev); 63 static int axgbe_dev_info_get(struct rte_eth_dev *dev, 64 struct rte_eth_dev_info *dev_info); 65 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev, 66 struct rte_eth_fc_conf *fc_conf); 67 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev, 68 struct rte_eth_fc_conf *fc_conf); 69 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 70 struct rte_eth_pfc_conf *pfc_conf); 71 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 72 struct rte_eth_rxq_info *qinfo); 73 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 74 struct rte_eth_txq_info *qinfo); 75 76 struct axgbe_xstats { 77 char name[RTE_ETH_XSTATS_NAME_SIZE]; 78 int offset; 79 }; 80 81 #define AXGMAC_MMC_STAT(_string, _var) \ 82 { _string, \ 83 offsetof(struct axgbe_mmc_stats, _var), \ 84 } 85 86 static const struct axgbe_xstats axgbe_xstats_strings[] = { 87 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), 88 AXGMAC_MMC_STAT("tx_packets", txframecount_gb), 89 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), 90 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), 91 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), 92 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), 93 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), 94 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), 95 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), 96 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), 97 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), 98 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), 99 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), 100 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes), 101 102 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), 103 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb), 104 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), 105 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), 106 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), 107 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), 108 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), 109 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), 110 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), 111 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), 112 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), 113 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), 114 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), 115 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), 116 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), 117 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), 118 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), 119 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror), 120 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), 121 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), 122 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), 123 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), 124 }; 125 126 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings) 127 128 /* The set of PCI devices this driver supports */ 129 #define AMD_PCI_VENDOR_ID 0x1022 130 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 131 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 132 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 133 134 int axgbe_logtype_init; 135 int axgbe_logtype_driver; 136 137 static const struct rte_pci_id pci_id_axgbe_map[] = { 138 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 139 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 140 { .vendor_id = 0, }, 141 }; 142 143 static struct axgbe_version_data axgbe_v2a = { 144 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 145 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 146 .mmc_64bit = 1, 147 .tx_max_fifo_size = 229376, 148 .rx_max_fifo_size = 229376, 149 .tx_tstamp_workaround = 1, 150 .ecc_support = 1, 151 .i2c_support = 1, 152 .an_cdr_workaround = 1, 153 }; 154 155 static struct axgbe_version_data axgbe_v2b = { 156 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 157 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 158 .mmc_64bit = 1, 159 .tx_max_fifo_size = 65536, 160 .rx_max_fifo_size = 65536, 161 .tx_tstamp_workaround = 1, 162 .ecc_support = 1, 163 .i2c_support = 1, 164 .an_cdr_workaround = 1, 165 }; 166 167 static const struct rte_eth_desc_lim rx_desc_lim = { 168 .nb_max = AXGBE_MAX_RING_DESC, 169 .nb_min = AXGBE_MIN_RING_DESC, 170 .nb_align = 8, 171 }; 172 173 static const struct rte_eth_desc_lim tx_desc_lim = { 174 .nb_max = AXGBE_MAX_RING_DESC, 175 .nb_min = AXGBE_MIN_RING_DESC, 176 .nb_align = 8, 177 }; 178 179 static const struct eth_dev_ops axgbe_eth_dev_ops = { 180 .dev_configure = axgbe_dev_configure, 181 .dev_start = axgbe_dev_start, 182 .dev_stop = axgbe_dev_stop, 183 .dev_close = axgbe_dev_close, 184 .promiscuous_enable = axgbe_dev_promiscuous_enable, 185 .promiscuous_disable = axgbe_dev_promiscuous_disable, 186 .allmulticast_enable = axgbe_dev_allmulticast_enable, 187 .allmulticast_disable = axgbe_dev_allmulticast_disable, 188 .mac_addr_set = axgbe_dev_mac_addr_set, 189 .mac_addr_add = axgbe_dev_mac_addr_add, 190 .mac_addr_remove = axgbe_dev_mac_addr_remove, 191 .set_mc_addr_list = axgbe_dev_set_mc_addr_list, 192 .uc_hash_table_set = axgbe_dev_uc_hash_table_set, 193 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set, 194 .link_update = axgbe_dev_link_update, 195 .get_reg = axgbe_dev_get_regs, 196 .stats_get = axgbe_dev_stats_get, 197 .stats_reset = axgbe_dev_stats_reset, 198 .xstats_get = axgbe_dev_xstats_get, 199 .xstats_reset = axgbe_dev_xstats_reset, 200 .xstats_get_names = axgbe_dev_xstats_get_names, 201 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id, 202 .xstats_get_by_id = axgbe_dev_xstats_get_by_id, 203 .dev_infos_get = axgbe_dev_info_get, 204 .rx_queue_setup = axgbe_dev_rx_queue_setup, 205 .rx_queue_release = axgbe_dev_rx_queue_release, 206 .tx_queue_setup = axgbe_dev_tx_queue_setup, 207 .tx_queue_release = axgbe_dev_tx_queue_release, 208 .flow_ctrl_get = axgbe_flow_ctrl_get, 209 .flow_ctrl_set = axgbe_flow_ctrl_set, 210 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set, 211 .rxq_info_get = axgbe_rxq_info_get, 212 .txq_info_get = axgbe_txq_info_get, 213 }; 214 215 static int axgbe_phy_reset(struct axgbe_port *pdata) 216 { 217 pdata->phy_link = -1; 218 pdata->phy_speed = SPEED_UNKNOWN; 219 return pdata->phy_if.phy_reset(pdata); 220 } 221 222 /* 223 * Interrupt handler triggered by NIC for handling 224 * specific interrupt. 225 * 226 * @param handle 227 * Pointer to interrupt handle. 228 * @param param 229 * The address of parameter (struct rte_eth_dev *) regsitered before. 230 * 231 * @return 232 * void 233 */ 234 static void 235 axgbe_dev_interrupt_handler(void *param) 236 { 237 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 238 struct axgbe_port *pdata = dev->data->dev_private; 239 unsigned int dma_isr, dma_ch_isr; 240 241 pdata->phy_if.an_isr(pdata); 242 /*DMA related interrupts*/ 243 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 244 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr); 245 if (dma_isr) { 246 if (dma_isr & 1) { 247 dma_ch_isr = 248 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 249 pdata->rx_queues[0], 250 DMA_CH_SR); 251 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr); 252 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 253 pdata->rx_queues[0], 254 DMA_CH_SR, dma_ch_isr); 255 } 256 } 257 /* Unmask interrupts since disabled after generation */ 258 rte_intr_ack(&pdata->pci_dev->intr_handle); 259 } 260 261 /* 262 * Configure device link speed and setup link. 263 * It returns 0 on success. 264 */ 265 static int 266 axgbe_dev_configure(struct rte_eth_dev *dev) 267 { 268 struct axgbe_port *pdata = dev->data->dev_private; 269 /* Checksum offload to hardware */ 270 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 271 DEV_RX_OFFLOAD_CHECKSUM; 272 return 0; 273 } 274 275 static int 276 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 277 { 278 struct axgbe_port *pdata = dev->data->dev_private; 279 280 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 281 pdata->rss_enable = 1; 282 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 283 pdata->rss_enable = 0; 284 else 285 return -1; 286 return 0; 287 } 288 289 static int 290 axgbe_dev_start(struct rte_eth_dev *dev) 291 { 292 struct axgbe_port *pdata = dev->data->dev_private; 293 int ret; 294 struct rte_eth_dev_data *dev_data = dev->data; 295 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len; 296 297 dev->dev_ops = &axgbe_eth_dev_ops; 298 299 PMD_INIT_FUNC_TRACE(); 300 301 /* Multiqueue RSS */ 302 ret = axgbe_dev_rx_mq_config(dev); 303 if (ret) { 304 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 305 return ret; 306 } 307 ret = axgbe_phy_reset(pdata); 308 if (ret) { 309 PMD_DRV_LOG(ERR, "phy reset failed\n"); 310 return ret; 311 } 312 ret = pdata->hw_if.init(pdata); 313 if (ret) { 314 PMD_DRV_LOG(ERR, "dev_init failed\n"); 315 return ret; 316 } 317 318 /* enable uio/vfio intr/eventfd mapping */ 319 rte_intr_enable(&pdata->pci_dev->intr_handle); 320 321 /* phy start*/ 322 pdata->phy_if.phy_start(pdata); 323 axgbe_dev_enable_tx(dev); 324 axgbe_dev_enable_rx(dev); 325 326 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); 327 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); 328 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || 329 max_pkt_len > pdata->rx_buf_size) 330 dev_data->scattered_rx = 1; 331 332 /* Scatter Rx handling */ 333 if (dev_data->scattered_rx) 334 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; 335 else 336 dev->rx_pkt_burst = &axgbe_recv_pkts; 337 338 return 0; 339 } 340 341 /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 342 static void 343 axgbe_dev_stop(struct rte_eth_dev *dev) 344 { 345 struct axgbe_port *pdata = dev->data->dev_private; 346 347 PMD_INIT_FUNC_TRACE(); 348 349 rte_intr_disable(&pdata->pci_dev->intr_handle); 350 351 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) 352 return; 353 354 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 355 axgbe_dev_disable_tx(dev); 356 axgbe_dev_disable_rx(dev); 357 358 pdata->phy_if.phy_stop(pdata); 359 pdata->hw_if.exit(pdata); 360 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 361 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 362 } 363 364 /* Clear all resources like TX/RX queues. */ 365 static void 366 axgbe_dev_close(struct rte_eth_dev *dev) 367 { 368 axgbe_dev_clear_queues(dev); 369 } 370 371 static int 372 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 373 { 374 struct axgbe_port *pdata = dev->data->dev_private; 375 376 PMD_INIT_FUNC_TRACE(); 377 378 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 379 380 return 0; 381 } 382 383 static int 384 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 385 { 386 struct axgbe_port *pdata = dev->data->dev_private; 387 388 PMD_INIT_FUNC_TRACE(); 389 390 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 391 392 return 0; 393 } 394 395 static int 396 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 397 { 398 struct axgbe_port *pdata = dev->data->dev_private; 399 400 PMD_INIT_FUNC_TRACE(); 401 402 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 403 return 0; 404 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 405 406 return 0; 407 } 408 409 static int 410 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 411 { 412 struct axgbe_port *pdata = dev->data->dev_private; 413 414 PMD_INIT_FUNC_TRACE(); 415 416 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 417 return 0; 418 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 419 420 return 0; 421 } 422 423 static int 424 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 425 { 426 struct axgbe_port *pdata = dev->data->dev_private; 427 428 /* Set Default MAC Addr */ 429 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0); 430 431 return 0; 432 } 433 434 static int 435 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 436 uint32_t index, uint32_t pool __rte_unused) 437 { 438 struct axgbe_port *pdata = dev->data->dev_private; 439 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 440 441 if (index > hw_feat->addn_mac) { 442 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 443 return -EINVAL; 444 } 445 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index); 446 return 0; 447 } 448 449 static void 450 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 451 { 452 struct axgbe_port *pdata = dev->data->dev_private; 453 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 454 455 if (index > hw_feat->addn_mac) { 456 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 457 return; 458 } 459 axgbe_set_mac_addn_addr(pdata, NULL, index); 460 } 461 462 static int 463 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 464 struct rte_ether_addr *mc_addr_set, 465 uint32_t nb_mc_addr) 466 { 467 struct axgbe_port *pdata = dev->data->dev_private; 468 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 469 uint32_t index = 1; /* 0 is always default mac */ 470 uint32_t i; 471 472 if (nb_mc_addr > hw_feat->addn_mac) { 473 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr); 474 return -EINVAL; 475 } 476 477 /* clear unicast addresses */ 478 for (i = 1; i < hw_feat->addn_mac; i++) { 479 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) 480 continue; 481 memset(&dev->data->mac_addrs[i], 0, 482 sizeof(struct rte_ether_addr)); 483 } 484 485 while (nb_mc_addr--) 486 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++); 487 488 return 0; 489 } 490 491 static int 492 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 493 struct rte_ether_addr *mac_addr, uint8_t add) 494 { 495 struct axgbe_port *pdata = dev->data->dev_private; 496 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 497 498 if (!hw_feat->hash_table_size) { 499 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 500 return -ENOTSUP; 501 } 502 503 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add); 504 505 if (pdata->uc_hash_mac_addr > 0) { 506 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 507 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 508 } else { 509 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 510 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 511 } 512 return 0; 513 } 514 515 static int 516 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add) 517 { 518 struct axgbe_port *pdata = dev->data->dev_private; 519 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 520 uint32_t index; 521 522 if (!hw_feat->hash_table_size) { 523 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 524 return -ENOTSUP; 525 } 526 527 for (index = 0; index < pdata->hash_table_count; index++) { 528 if (add) 529 pdata->uc_hash_table[index] = ~0; 530 else 531 pdata->uc_hash_table[index] = 0; 532 533 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n", 534 add ? "set" : "clear", index); 535 536 AXGMAC_IOWRITE(pdata, MAC_HTR(index), 537 pdata->uc_hash_table[index]); 538 } 539 540 if (add) { 541 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 542 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 543 } else { 544 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 545 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 546 } 547 return 0; 548 } 549 550 /* return 0 means link status changed, -1 means not changed */ 551 static int 552 axgbe_dev_link_update(struct rte_eth_dev *dev, 553 int wait_to_complete __rte_unused) 554 { 555 struct axgbe_port *pdata = dev->data->dev_private; 556 struct rte_eth_link link; 557 int ret = 0; 558 559 PMD_INIT_FUNC_TRACE(); 560 rte_delay_ms(800); 561 562 pdata->phy_if.phy_status(pdata); 563 564 memset(&link, 0, sizeof(struct rte_eth_link)); 565 link.link_duplex = pdata->phy.duplex; 566 link.link_status = pdata->phy_link; 567 link.link_speed = pdata->phy_speed; 568 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 569 ETH_LINK_SPEED_FIXED); 570 ret = rte_eth_linkstatus_set(dev, &link); 571 if (ret == -1) 572 PMD_DRV_LOG(ERR, "No change in link status\n"); 573 574 return ret; 575 } 576 577 static int 578 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 579 { 580 struct axgbe_port *pdata = dev->data->dev_private; 581 582 if (regs->data == NULL) { 583 regs->length = axgbe_regs_get_count(pdata); 584 regs->width = sizeof(uint32_t); 585 return 0; 586 } 587 588 /* Only full register dump is supported */ 589 if (regs->length && 590 regs->length != (uint32_t)axgbe_regs_get_count(pdata)) 591 return -ENOTSUP; 592 593 regs->version = pdata->pci_dev->id.vendor_id << 16 | 594 pdata->pci_dev->id.device_id; 595 axgbe_regs_dump(pdata, regs->data); 596 return 0; 597 } 598 static void axgbe_read_mmc_stats(struct axgbe_port *pdata) 599 { 600 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 601 602 /* Freeze counters */ 603 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 604 605 /* Tx counters */ 606 stats->txoctetcount_gb += 607 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 608 stats->txoctetcount_gb += 609 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32); 610 611 stats->txframecount_gb += 612 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 613 stats->txframecount_gb += 614 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32); 615 616 stats->txbroadcastframes_g += 617 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 618 stats->txbroadcastframes_g += 619 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32); 620 621 stats->txmulticastframes_g += 622 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 623 stats->txmulticastframes_g += 624 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32); 625 626 stats->tx64octets_gb += 627 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 628 stats->tx64octets_gb += 629 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32); 630 631 stats->tx65to127octets_gb += 632 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 633 stats->tx65to127octets_gb += 634 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32); 635 636 stats->tx128to255octets_gb += 637 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 638 stats->tx128to255octets_gb += 639 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32); 640 641 stats->tx256to511octets_gb += 642 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 643 stats->tx256to511octets_gb += 644 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32); 645 646 stats->tx512to1023octets_gb += 647 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 648 stats->tx512to1023octets_gb += 649 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32); 650 651 stats->tx1024tomaxoctets_gb += 652 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 653 stats->tx1024tomaxoctets_gb += 654 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32); 655 656 stats->txunicastframes_gb += 657 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 658 stats->txunicastframes_gb += 659 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32); 660 661 stats->txmulticastframes_gb += 662 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 663 stats->txmulticastframes_gb += 664 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32); 665 666 stats->txbroadcastframes_g += 667 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 668 stats->txbroadcastframes_g += 669 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32); 670 671 stats->txunderflowerror += 672 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 673 stats->txunderflowerror += 674 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32); 675 676 stats->txoctetcount_g += 677 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 678 stats->txoctetcount_g += 679 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32); 680 681 stats->txframecount_g += 682 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 683 stats->txframecount_g += 684 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32); 685 686 stats->txpauseframes += 687 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 688 stats->txpauseframes += 689 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32); 690 691 stats->txvlanframes_g += 692 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 693 stats->txvlanframes_g += 694 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32); 695 696 /* Rx counters */ 697 stats->rxframecount_gb += 698 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 699 stats->rxframecount_gb += 700 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32); 701 702 stats->rxoctetcount_gb += 703 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 704 stats->rxoctetcount_gb += 705 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32); 706 707 stats->rxoctetcount_g += 708 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 709 stats->rxoctetcount_g += 710 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32); 711 712 stats->rxbroadcastframes_g += 713 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 714 stats->rxbroadcastframes_g += 715 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32); 716 717 stats->rxmulticastframes_g += 718 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 719 stats->rxmulticastframes_g += 720 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32); 721 722 stats->rxcrcerror += 723 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 724 stats->rxcrcerror += 725 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32); 726 727 stats->rxrunterror += 728 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 729 730 stats->rxjabbererror += 731 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 732 733 stats->rxundersize_g += 734 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 735 736 stats->rxoversize_g += 737 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 738 739 stats->rx64octets_gb += 740 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 741 stats->rx64octets_gb += 742 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32); 743 744 stats->rx65to127octets_gb += 745 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 746 stats->rx65to127octets_gb += 747 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32); 748 749 stats->rx128to255octets_gb += 750 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 751 stats->rx128to255octets_gb += 752 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32); 753 754 stats->rx256to511octets_gb += 755 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 756 stats->rx256to511octets_gb += 757 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32); 758 759 stats->rx512to1023octets_gb += 760 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 761 stats->rx512to1023octets_gb += 762 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32); 763 764 stats->rx1024tomaxoctets_gb += 765 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 766 stats->rx1024tomaxoctets_gb += 767 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32); 768 769 stats->rxunicastframes_g += 770 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 771 stats->rxunicastframes_g += 772 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32); 773 774 stats->rxlengtherror += 775 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 776 stats->rxlengtherror += 777 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32); 778 779 stats->rxoutofrangetype += 780 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 781 stats->rxoutofrangetype += 782 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32); 783 784 stats->rxpauseframes += 785 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 786 stats->rxpauseframes += 787 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32); 788 789 stats->rxfifooverflow += 790 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 791 stats->rxfifooverflow += 792 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32); 793 794 stats->rxvlanframes_gb += 795 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 796 stats->rxvlanframes_gb += 797 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32); 798 799 stats->rxwatchdogerror += 800 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 801 802 /* Un-freeze counters */ 803 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 804 } 805 806 static int 807 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 808 unsigned int n) 809 { 810 struct axgbe_port *pdata = dev->data->dev_private; 811 unsigned int i; 812 813 if (!stats) 814 return 0; 815 816 axgbe_read_mmc_stats(pdata); 817 818 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) { 819 stats[i].id = i; 820 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats + 821 axgbe_xstats_strings[i].offset); 822 } 823 824 return i; 825 } 826 827 static int 828 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 829 struct rte_eth_xstat_name *xstats_names, 830 unsigned int n) 831 { 832 unsigned int i; 833 834 if (n >= AXGBE_XSTATS_COUNT && xstats_names) { 835 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) { 836 snprintf(xstats_names[i].name, 837 RTE_ETH_XSTATS_NAME_SIZE, "%s", 838 axgbe_xstats_strings[i].name); 839 } 840 } 841 842 return AXGBE_XSTATS_COUNT; 843 } 844 845 static int 846 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 847 uint64_t *values, unsigned int n) 848 { 849 unsigned int i; 850 uint64_t values_copy[AXGBE_XSTATS_COUNT]; 851 852 if (!ids) { 853 struct axgbe_port *pdata = dev->data->dev_private; 854 855 if (n < AXGBE_XSTATS_COUNT) 856 return AXGBE_XSTATS_COUNT; 857 858 axgbe_read_mmc_stats(pdata); 859 860 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { 861 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats + 862 axgbe_xstats_strings[i].offset); 863 } 864 865 return i; 866 } 867 868 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT); 869 870 for (i = 0; i < n; i++) { 871 if (ids[i] >= AXGBE_XSTATS_COUNT) { 872 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 873 return -1; 874 } 875 values[i] = values_copy[ids[i]]; 876 } 877 return n; 878 } 879 880 static int 881 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 882 struct rte_eth_xstat_name *xstats_names, 883 const uint64_t *ids, 884 unsigned int size) 885 { 886 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT]; 887 unsigned int i; 888 889 if (!ids) 890 return axgbe_dev_xstats_get_names(dev, xstats_names, size); 891 892 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size); 893 894 for (i = 0; i < size; i++) { 895 if (ids[i] >= AXGBE_XSTATS_COUNT) { 896 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 897 return -1; 898 } 899 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 900 } 901 return size; 902 } 903 904 static int 905 axgbe_dev_xstats_reset(struct rte_eth_dev *dev) 906 { 907 struct axgbe_port *pdata = dev->data->dev_private; 908 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 909 910 /* MMC registers are configured for reset on read */ 911 axgbe_read_mmc_stats(pdata); 912 913 /* Reset stats */ 914 memset(stats, 0, sizeof(*stats)); 915 916 return 0; 917 } 918 919 static int 920 axgbe_dev_stats_get(struct rte_eth_dev *dev, 921 struct rte_eth_stats *stats) 922 { 923 struct axgbe_rx_queue *rxq; 924 struct axgbe_tx_queue *txq; 925 struct axgbe_port *pdata = dev->data->dev_private; 926 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats; 927 unsigned int i; 928 929 axgbe_read_mmc_stats(pdata); 930 931 stats->imissed = mmc_stats->rxfifooverflow; 932 933 for (i = 0; i < dev->data->nb_rx_queues; i++) { 934 rxq = dev->data->rx_queues[i]; 935 stats->q_ipackets[i] = rxq->pkts; 936 stats->ipackets += rxq->pkts; 937 stats->q_ibytes[i] = rxq->bytes; 938 stats->ibytes += rxq->bytes; 939 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed; 940 stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed; 941 stats->ierrors += rxq->errors; 942 } 943 944 for (i = 0; i < dev->data->nb_tx_queues; i++) { 945 txq = dev->data->tx_queues[i]; 946 stats->q_opackets[i] = txq->pkts; 947 stats->opackets += txq->pkts; 948 stats->q_obytes[i] = txq->bytes; 949 stats->obytes += txq->bytes; 950 stats->oerrors += txq->errors; 951 } 952 953 return 0; 954 } 955 956 static int 957 axgbe_dev_stats_reset(struct rte_eth_dev *dev) 958 { 959 struct axgbe_rx_queue *rxq; 960 struct axgbe_tx_queue *txq; 961 unsigned int i; 962 963 for (i = 0; i < dev->data->nb_rx_queues; i++) { 964 rxq = dev->data->rx_queues[i]; 965 rxq->pkts = 0; 966 rxq->bytes = 0; 967 rxq->errors = 0; 968 rxq->rx_mbuf_alloc_failed = 0; 969 } 970 for (i = 0; i < dev->data->nb_tx_queues; i++) { 971 txq = dev->data->tx_queues[i]; 972 txq->pkts = 0; 973 txq->bytes = 0; 974 txq->errors = 0; 975 } 976 977 return 0; 978 } 979 980 static int 981 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 982 { 983 struct axgbe_port *pdata = dev->data->dev_private; 984 985 dev_info->max_rx_queues = pdata->rx_ring_count; 986 dev_info->max_tx_queues = pdata->tx_ring_count; 987 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 988 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 989 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1; 990 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size; 991 dev_info->speed_capa = ETH_LINK_SPEED_10G; 992 993 dev_info->rx_offload_capa = 994 DEV_RX_OFFLOAD_IPV4_CKSUM | 995 DEV_RX_OFFLOAD_UDP_CKSUM | 996 DEV_RX_OFFLOAD_TCP_CKSUM | 997 DEV_RX_OFFLOAD_JUMBO_FRAME | 998 DEV_RX_OFFLOAD_SCATTER | 999 DEV_RX_OFFLOAD_KEEP_CRC; 1000 1001 dev_info->tx_offload_capa = 1002 DEV_TX_OFFLOAD_IPV4_CKSUM | 1003 DEV_TX_OFFLOAD_UDP_CKSUM | 1004 DEV_TX_OFFLOAD_TCP_CKSUM; 1005 1006 if (pdata->hw_feat.rss) { 1007 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 1008 dev_info->reta_size = pdata->hw_feat.hash_table_size; 1009 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 1010 } 1011 1012 dev_info->rx_desc_lim = rx_desc_lim; 1013 dev_info->tx_desc_lim = tx_desc_lim; 1014 1015 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1016 .rx_free_thresh = AXGBE_RX_FREE_THRESH, 1017 }; 1018 1019 dev_info->default_txconf = (struct rte_eth_txconf) { 1020 .tx_free_thresh = AXGBE_TX_FREE_THRESH, 1021 }; 1022 1023 return 0; 1024 } 1025 1026 static int 1027 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1028 { 1029 struct axgbe_port *pdata = dev->data->dev_private; 1030 struct xgbe_fc_info fc = pdata->fc; 1031 unsigned int reg, reg_val = 0; 1032 1033 reg = MAC_Q0TFCR; 1034 reg_val = AXGMAC_IOREAD(pdata, reg); 1035 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA); 1036 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD); 1037 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT); 1038 fc.autoneg = pdata->pause_autoneg; 1039 1040 if (pdata->rx_pause && pdata->tx_pause) 1041 fc.mode = RTE_FC_FULL; 1042 else if (pdata->rx_pause) 1043 fc.mode = RTE_FC_RX_PAUSE; 1044 else if (pdata->tx_pause) 1045 fc.mode = RTE_FC_TX_PAUSE; 1046 else 1047 fc.mode = RTE_FC_NONE; 1048 1049 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024; 1050 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024; 1051 fc_conf->pause_time = fc.pause_time[0]; 1052 fc_conf->send_xon = fc.send_xon; 1053 fc_conf->mode = fc.mode; 1054 1055 return 0; 1056 } 1057 1058 static int 1059 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1060 { 1061 struct axgbe_port *pdata = dev->data->dev_private; 1062 struct xgbe_fc_info fc = pdata->fc; 1063 unsigned int reg, reg_val = 0; 1064 reg = MAC_Q0TFCR; 1065 1066 pdata->pause_autoneg = fc_conf->autoneg; 1067 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1068 fc.send_xon = fc_conf->send_xon; 1069 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA, 1070 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water)); 1071 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD, 1072 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water)); 1073 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time); 1074 AXGMAC_IOWRITE(pdata, reg, reg_val); 1075 fc.mode = fc_conf->mode; 1076 1077 if (fc.mode == RTE_FC_FULL) { 1078 pdata->tx_pause = 1; 1079 pdata->rx_pause = 1; 1080 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1081 pdata->tx_pause = 0; 1082 pdata->rx_pause = 1; 1083 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1084 pdata->tx_pause = 1; 1085 pdata->rx_pause = 0; 1086 } else { 1087 pdata->tx_pause = 0; 1088 pdata->rx_pause = 0; 1089 } 1090 1091 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1092 pdata->hw_if.config_tx_flow_control(pdata); 1093 1094 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1095 pdata->hw_if.config_rx_flow_control(pdata); 1096 1097 pdata->hw_if.config_flow_control(pdata); 1098 pdata->phy.tx_pause = pdata->tx_pause; 1099 pdata->phy.rx_pause = pdata->rx_pause; 1100 1101 return 0; 1102 } 1103 1104 static int 1105 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 1106 struct rte_eth_pfc_conf *pfc_conf) 1107 { 1108 struct axgbe_port *pdata = dev->data->dev_private; 1109 struct xgbe_fc_info fc = pdata->fc; 1110 uint8_t tc_num; 1111 1112 tc_num = pdata->pfc_map[pfc_conf->priority]; 1113 1114 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { 1115 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", 1116 pdata->hw_feat.tc_cnt); 1117 return -EINVAL; 1118 } 1119 1120 pdata->pause_autoneg = pfc_conf->fc.autoneg; 1121 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1122 fc.send_xon = pfc_conf->fc.send_xon; 1123 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA, 1124 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water)); 1125 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD, 1126 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water)); 1127 1128 switch (tc_num) { 1129 case 0: 1130 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1131 PSTC0, pfc_conf->fc.pause_time); 1132 break; 1133 case 1: 1134 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1135 PSTC1, pfc_conf->fc.pause_time); 1136 break; 1137 case 2: 1138 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1139 PSTC2, pfc_conf->fc.pause_time); 1140 break; 1141 case 3: 1142 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1143 PSTC3, pfc_conf->fc.pause_time); 1144 break; 1145 case 4: 1146 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1147 PSTC4, pfc_conf->fc.pause_time); 1148 break; 1149 case 5: 1150 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1151 PSTC5, pfc_conf->fc.pause_time); 1152 break; 1153 case 7: 1154 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1155 PSTC6, pfc_conf->fc.pause_time); 1156 break; 1157 case 6: 1158 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1159 PSTC7, pfc_conf->fc.pause_time); 1160 break; 1161 } 1162 1163 fc.mode = pfc_conf->fc.mode; 1164 1165 if (fc.mode == RTE_FC_FULL) { 1166 pdata->tx_pause = 1; 1167 pdata->rx_pause = 1; 1168 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1169 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1170 pdata->tx_pause = 0; 1171 pdata->rx_pause = 1; 1172 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1173 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1174 pdata->tx_pause = 1; 1175 pdata->rx_pause = 0; 1176 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1177 } else { 1178 pdata->tx_pause = 0; 1179 pdata->rx_pause = 0; 1180 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1181 } 1182 1183 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1184 pdata->hw_if.config_tx_flow_control(pdata); 1185 1186 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1187 pdata->hw_if.config_rx_flow_control(pdata); 1188 pdata->hw_if.config_flow_control(pdata); 1189 pdata->phy.tx_pause = pdata->tx_pause; 1190 pdata->phy.rx_pause = pdata->rx_pause; 1191 1192 return 0; 1193 } 1194 1195 void 1196 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1197 struct rte_eth_rxq_info *qinfo) 1198 { 1199 struct axgbe_rx_queue *rxq; 1200 1201 rxq = dev->data->rx_queues[queue_id]; 1202 qinfo->mp = rxq->mb_pool; 1203 qinfo->scattered_rx = dev->data->scattered_rx; 1204 qinfo->nb_desc = rxq->nb_desc; 1205 qinfo->conf.rx_free_thresh = rxq->free_thresh; 1206 } 1207 1208 void 1209 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1210 struct rte_eth_txq_info *qinfo) 1211 { 1212 struct axgbe_tx_queue *txq; 1213 1214 txq = dev->data->tx_queues[queue_id]; 1215 qinfo->nb_desc = txq->nb_desc; 1216 qinfo->conf.tx_free_thresh = txq->free_thresh; 1217 } 1218 1219 static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 1220 { 1221 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 1222 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 1223 1224 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 1225 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 1226 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 1227 1228 memset(hw_feat, 0, sizeof(*hw_feat)); 1229 1230 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 1231 1232 /* Hardware feature register 0 */ 1233 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 1234 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 1235 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 1236 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 1237 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 1238 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 1239 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 1240 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 1241 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 1242 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 1243 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 1244 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 1245 ADDMACADRSEL); 1246 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 1247 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 1248 1249 /* Hardware feature register 1 */ 1250 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1251 RXFIFOSIZE); 1252 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1253 TXFIFOSIZE); 1254 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 1255 MAC_HWF1R, ADVTHWORD); 1256 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 1257 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 1258 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 1259 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 1260 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 1261 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 1262 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 1263 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1264 HASHTBLSZ); 1265 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1266 L3L4FNUM); 1267 1268 /* Hardware feature register 2 */ 1269 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 1270 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 1271 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 1272 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 1273 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 1274 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 1275 AUXSNAPNUM); 1276 1277 /* Translate the Hash Table size into actual number */ 1278 switch (hw_feat->hash_table_size) { 1279 case 0: 1280 break; 1281 case 1: 1282 hw_feat->hash_table_size = 64; 1283 break; 1284 case 2: 1285 hw_feat->hash_table_size = 128; 1286 break; 1287 case 3: 1288 hw_feat->hash_table_size = 256; 1289 break; 1290 } 1291 1292 /* Translate the address width setting into actual number */ 1293 switch (hw_feat->dma_width) { 1294 case 0: 1295 hw_feat->dma_width = 32; 1296 break; 1297 case 1: 1298 hw_feat->dma_width = 40; 1299 break; 1300 case 2: 1301 hw_feat->dma_width = 48; 1302 break; 1303 default: 1304 hw_feat->dma_width = 32; 1305 } 1306 1307 /* The Queue, Channel and TC counts are zero based so increment them 1308 * to get the actual number 1309 */ 1310 hw_feat->rx_q_cnt++; 1311 hw_feat->tx_q_cnt++; 1312 hw_feat->rx_ch_cnt++; 1313 hw_feat->tx_ch_cnt++; 1314 hw_feat->tc_cnt++; 1315 1316 /* Translate the fifo sizes into actual numbers */ 1317 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 1318 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 1319 } 1320 1321 static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 1322 { 1323 axgbe_init_function_ptrs_dev(&pdata->hw_if); 1324 axgbe_init_function_ptrs_phy(&pdata->phy_if); 1325 axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 1326 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 1327 } 1328 1329 static void axgbe_set_counts(struct axgbe_port *pdata) 1330 { 1331 /* Set all the function pointers */ 1332 axgbe_init_all_fptrs(pdata); 1333 1334 /* Populate the hardware features */ 1335 axgbe_get_all_hw_features(pdata); 1336 1337 /* Set default max values if not provided */ 1338 if (!pdata->tx_max_channel_count) 1339 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 1340 if (!pdata->rx_max_channel_count) 1341 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 1342 1343 if (!pdata->tx_max_q_count) 1344 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 1345 if (!pdata->rx_max_q_count) 1346 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 1347 1348 /* Calculate the number of Tx and Rx rings to be created 1349 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 1350 * the number of Tx queues to the number of Tx channels 1351 * enabled 1352 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 1353 * number of Rx queues or maximum allowed 1354 */ 1355 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 1356 pdata->tx_max_channel_count); 1357 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 1358 pdata->tx_max_q_count); 1359 1360 pdata->tx_q_count = pdata->tx_ring_count; 1361 1362 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 1363 pdata->rx_max_channel_count); 1364 1365 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 1366 pdata->rx_max_q_count); 1367 } 1368 1369 static void axgbe_default_config(struct axgbe_port *pdata) 1370 { 1371 pdata->pblx8 = DMA_PBL_X8_ENABLE; 1372 pdata->tx_sf_mode = MTL_TSF_ENABLE; 1373 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 1374 pdata->tx_pbl = DMA_PBL_32; 1375 pdata->tx_osp_mode = DMA_OSP_ENABLE; 1376 pdata->rx_sf_mode = MTL_RSF_ENABLE; 1377 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 1378 pdata->rx_pbl = DMA_PBL_32; 1379 pdata->pause_autoneg = 1; 1380 pdata->tx_pause = 0; 1381 pdata->rx_pause = 0; 1382 pdata->phy_speed = SPEED_UNKNOWN; 1383 pdata->power_down = 0; 1384 } 1385 1386 static int 1387 pci_device_cmp(const struct rte_device *dev, const void *_pci_id) 1388 { 1389 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); 1390 const struct rte_pci_id *pcid = _pci_id; 1391 1392 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID && 1393 pdev->id.device_id == pcid->device_id) 1394 return 0; 1395 return 1; 1396 } 1397 1398 static bool 1399 pci_search_device(int device_id) 1400 { 1401 struct rte_bus *pci_bus; 1402 struct rte_pci_id dev_id; 1403 1404 dev_id.device_id = device_id; 1405 pci_bus = rte_bus_find_by_name("pci"); 1406 return (pci_bus != NULL) && 1407 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL); 1408 } 1409 1410 /* 1411 * It returns 0 on success. 1412 */ 1413 static int 1414 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 1415 { 1416 PMD_INIT_FUNC_TRACE(); 1417 struct axgbe_port *pdata; 1418 struct rte_pci_device *pci_dev; 1419 uint32_t reg, mac_lo, mac_hi; 1420 uint32_t len; 1421 int ret; 1422 1423 eth_dev->dev_ops = &axgbe_eth_dev_ops; 1424 1425 /* 1426 * For secondary processes, we don't initialise any further as primary 1427 * has already done this work. 1428 */ 1429 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1430 return 0; 1431 1432 pdata = eth_dev->data->dev_private; 1433 /* initial state */ 1434 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 1435 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 1436 pdata->eth_dev = eth_dev; 1437 1438 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1439 pdata->pci_dev = pci_dev; 1440 1441 /* 1442 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE 1443 */ 1444 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) { 1445 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; 1446 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; 1447 } else { 1448 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 1449 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 1450 } 1451 1452 pdata->xgmac_regs = 1453 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 1454 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 1455 + AXGBE_MAC_PROP_OFFSET); 1456 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 1457 + AXGBE_I2C_CTRL_OFFSET); 1458 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 1459 1460 /* version specific driver data*/ 1461 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 1462 pdata->vdata = &axgbe_v2a; 1463 else 1464 pdata->vdata = &axgbe_v2b; 1465 1466 /* Configure the PCS indirect addressing support */ 1467 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); 1468 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 1469 pdata->xpcs_window <<= 6; 1470 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 1471 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 1472 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 1473 1474 PMD_INIT_LOG(DEBUG, 1475 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 1476 pdata->xpcs_window_size, pdata->xpcs_window_mask); 1477 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 1478 1479 /* Retrieve the MAC address */ 1480 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 1481 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 1482 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 1483 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 1484 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 1485 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 1486 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 1487 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 1488 1489 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS; 1490 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0); 1491 1492 if (!eth_dev->data->mac_addrs) { 1493 PMD_INIT_LOG(ERR, 1494 "Failed to alloc %u bytes needed to " 1495 "store MAC addresses", len); 1496 return -ENOMEM; 1497 } 1498 1499 /* Allocate memory for storing hash filter MAC addresses */ 1500 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS; 1501 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr", 1502 len, 0); 1503 1504 if (eth_dev->data->hash_mac_addrs == NULL) { 1505 PMD_INIT_LOG(ERR, 1506 "Failed to allocate %d bytes needed to " 1507 "store MAC addresses", len); 1508 return -ENOMEM; 1509 } 1510 1511 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 1512 rte_eth_random_addr(pdata->mac_addr.addr_bytes); 1513 1514 /* Copy the permanent MAC address */ 1515 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 1516 1517 /* Clock settings */ 1518 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 1519 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 1520 1521 /* Set the DMA coherency values */ 1522 pdata->coherent = 1; 1523 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 1524 pdata->arcache = AXGBE_DMA_OS_ARCACHE; 1525 pdata->awcache = AXGBE_DMA_OS_AWCACHE; 1526 1527 /* Set the maximum channels and queues */ 1528 reg = XP_IOREAD(pdata, XP_PROP_1); 1529 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 1530 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 1531 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 1532 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 1533 1534 /* Set the hardware channel and queue counts */ 1535 axgbe_set_counts(pdata); 1536 1537 /* Set the maximum fifo amounts */ 1538 reg = XP_IOREAD(pdata, XP_PROP_2); 1539 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 1540 pdata->tx_max_fifo_size *= 16384; 1541 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 1542 pdata->vdata->tx_max_fifo_size); 1543 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 1544 pdata->rx_max_fifo_size *= 16384; 1545 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 1546 pdata->vdata->rx_max_fifo_size); 1547 /* Issue software reset to DMA */ 1548 ret = pdata->hw_if.exit(pdata); 1549 if (ret) 1550 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 1551 1552 /* Set default configuration data */ 1553 axgbe_default_config(pdata); 1554 1555 /* Set default max values if not provided */ 1556 if (!pdata->tx_max_fifo_size) 1557 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 1558 if (!pdata->rx_max_fifo_size) 1559 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 1560 1561 pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 1562 pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 1563 pthread_mutex_init(&pdata->xpcs_mutex, NULL); 1564 pthread_mutex_init(&pdata->i2c_mutex, NULL); 1565 pthread_mutex_init(&pdata->an_mutex, NULL); 1566 pthread_mutex_init(&pdata->phy_mutex, NULL); 1567 1568 ret = pdata->phy_if.phy_init(pdata); 1569 if (ret) { 1570 rte_free(eth_dev->data->mac_addrs); 1571 eth_dev->data->mac_addrs = NULL; 1572 return ret; 1573 } 1574 1575 rte_intr_callback_register(&pci_dev->intr_handle, 1576 axgbe_dev_interrupt_handler, 1577 (void *)eth_dev); 1578 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1579 eth_dev->data->port_id, pci_dev->id.vendor_id, 1580 pci_dev->id.device_id); 1581 1582 return 0; 1583 } 1584 1585 static int 1586 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1587 { 1588 struct rte_pci_device *pci_dev; 1589 1590 PMD_INIT_FUNC_TRACE(); 1591 1592 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1593 return 0; 1594 1595 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1596 eth_dev->dev_ops = NULL; 1597 eth_dev->rx_pkt_burst = NULL; 1598 eth_dev->tx_pkt_burst = NULL; 1599 axgbe_dev_clear_queues(eth_dev); 1600 1601 /* disable uio intr before callback unregister */ 1602 rte_intr_disable(&pci_dev->intr_handle); 1603 rte_intr_callback_unregister(&pci_dev->intr_handle, 1604 axgbe_dev_interrupt_handler, 1605 (void *)eth_dev); 1606 1607 return 0; 1608 } 1609 1610 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1611 struct rte_pci_device *pci_dev) 1612 { 1613 return rte_eth_dev_pci_generic_probe(pci_dev, 1614 sizeof(struct axgbe_port), eth_axgbe_dev_init); 1615 } 1616 1617 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 1618 { 1619 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 1620 } 1621 1622 static struct rte_pci_driver rte_axgbe_pmd = { 1623 .id_table = pci_id_axgbe_map, 1624 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1625 .probe = eth_axgbe_pci_probe, 1626 .remove = eth_axgbe_pci_remove, 1627 }; 1628 1629 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 1630 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 1631 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 1632 1633 RTE_INIT(axgbe_init_log) 1634 { 1635 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); 1636 if (axgbe_logtype_init >= 0) 1637 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); 1638 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); 1639 if (axgbe_logtype_driver >= 0) 1640 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); 1641 } 1642