1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_rxtx.h" 7 #include "axgbe_ethdev.h" 8 #include "axgbe_common.h" 9 #include "axgbe_phy.h" 10 #include "axgbe_regs.h" 11 12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 14 static int axgbe_dev_configure(struct rte_eth_dev *dev); 15 static int axgbe_dev_start(struct rte_eth_dev *dev); 16 static void axgbe_dev_stop(struct rte_eth_dev *dev); 17 static void axgbe_dev_interrupt_handler(void *param); 18 static void axgbe_dev_close(struct rte_eth_dev *dev); 19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, 24 struct rte_ether_addr *mac_addr); 25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, 26 struct rte_ether_addr *mac_addr, 27 uint32_t index, 28 uint32_t vmdq); 29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 31 struct rte_ether_addr *mc_addr_set, 32 uint32_t nb_mc_addr); 33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 34 struct rte_ether_addr *mac_addr, 35 uint8_t add); 36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, 37 uint8_t add); 38 static int axgbe_dev_link_update(struct rte_eth_dev *dev, 39 int wait_to_complete); 40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev, 41 struct rte_dev_reg_info *regs); 42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 43 struct rte_eth_stats *stats); 44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev); 45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev, 46 struct rte_eth_xstat *stats, 47 unsigned int n); 48 static int 49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 50 struct rte_eth_xstat_name *xstats_names, 51 unsigned int size); 52 static int 53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, 54 const uint64_t *ids, 55 uint64_t *values, 56 unsigned int n); 57 static int 58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 59 struct rte_eth_xstat_name *xstats_names, 60 const uint64_t *ids, 61 unsigned int size); 62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev); 63 static int axgbe_dev_info_get(struct rte_eth_dev *dev, 64 struct rte_eth_dev_info *dev_info); 65 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev, 66 struct rte_eth_fc_conf *fc_conf); 67 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev, 68 struct rte_eth_fc_conf *fc_conf); 69 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 70 struct rte_eth_pfc_conf *pfc_conf); 71 72 struct axgbe_xstats { 73 char name[RTE_ETH_XSTATS_NAME_SIZE]; 74 int offset; 75 }; 76 77 #define AXGMAC_MMC_STAT(_string, _var) \ 78 { _string, \ 79 offsetof(struct axgbe_mmc_stats, _var), \ 80 } 81 82 static const struct axgbe_xstats axgbe_xstats_strings[] = { 83 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), 84 AXGMAC_MMC_STAT("tx_packets", txframecount_gb), 85 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), 86 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), 87 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), 88 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), 89 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), 90 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), 91 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), 92 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), 93 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), 94 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), 95 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), 96 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes), 97 98 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), 99 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb), 100 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), 101 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), 102 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), 103 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), 104 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), 105 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), 106 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), 107 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), 108 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), 109 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), 110 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), 111 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), 112 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), 113 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), 114 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), 115 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror), 116 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), 117 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), 118 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), 119 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), 120 }; 121 122 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings) 123 124 /* The set of PCI devices this driver supports */ 125 #define AMD_PCI_VENDOR_ID 0x1022 126 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 127 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 128 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 129 130 int axgbe_logtype_init; 131 int axgbe_logtype_driver; 132 133 static const struct rte_pci_id pci_id_axgbe_map[] = { 134 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 135 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 136 { .vendor_id = 0, }, 137 }; 138 139 static struct axgbe_version_data axgbe_v2a = { 140 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 141 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 142 .mmc_64bit = 1, 143 .tx_max_fifo_size = 229376, 144 .rx_max_fifo_size = 229376, 145 .tx_tstamp_workaround = 1, 146 .ecc_support = 1, 147 .i2c_support = 1, 148 .an_cdr_workaround = 1, 149 }; 150 151 static struct axgbe_version_data axgbe_v2b = { 152 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 153 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 154 .mmc_64bit = 1, 155 .tx_max_fifo_size = 65536, 156 .rx_max_fifo_size = 65536, 157 .tx_tstamp_workaround = 1, 158 .ecc_support = 1, 159 .i2c_support = 1, 160 .an_cdr_workaround = 1, 161 }; 162 163 static const struct rte_eth_desc_lim rx_desc_lim = { 164 .nb_max = AXGBE_MAX_RING_DESC, 165 .nb_min = AXGBE_MIN_RING_DESC, 166 .nb_align = 8, 167 }; 168 169 static const struct rte_eth_desc_lim tx_desc_lim = { 170 .nb_max = AXGBE_MAX_RING_DESC, 171 .nb_min = AXGBE_MIN_RING_DESC, 172 .nb_align = 8, 173 }; 174 175 static const struct eth_dev_ops axgbe_eth_dev_ops = { 176 .dev_configure = axgbe_dev_configure, 177 .dev_start = axgbe_dev_start, 178 .dev_stop = axgbe_dev_stop, 179 .dev_close = axgbe_dev_close, 180 .promiscuous_enable = axgbe_dev_promiscuous_enable, 181 .promiscuous_disable = axgbe_dev_promiscuous_disable, 182 .allmulticast_enable = axgbe_dev_allmulticast_enable, 183 .allmulticast_disable = axgbe_dev_allmulticast_disable, 184 .mac_addr_set = axgbe_dev_mac_addr_set, 185 .mac_addr_add = axgbe_dev_mac_addr_add, 186 .mac_addr_remove = axgbe_dev_mac_addr_remove, 187 .set_mc_addr_list = axgbe_dev_set_mc_addr_list, 188 .uc_hash_table_set = axgbe_dev_uc_hash_table_set, 189 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set, 190 .link_update = axgbe_dev_link_update, 191 .get_reg = axgbe_dev_get_regs, 192 .stats_get = axgbe_dev_stats_get, 193 .stats_reset = axgbe_dev_stats_reset, 194 .xstats_get = axgbe_dev_xstats_get, 195 .xstats_reset = axgbe_dev_xstats_reset, 196 .xstats_get_names = axgbe_dev_xstats_get_names, 197 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id, 198 .xstats_get_by_id = axgbe_dev_xstats_get_by_id, 199 .dev_infos_get = axgbe_dev_info_get, 200 .rx_queue_setup = axgbe_dev_rx_queue_setup, 201 .rx_queue_release = axgbe_dev_rx_queue_release, 202 .tx_queue_setup = axgbe_dev_tx_queue_setup, 203 .tx_queue_release = axgbe_dev_tx_queue_release, 204 .flow_ctrl_get = axgbe_flow_ctrl_get, 205 .flow_ctrl_set = axgbe_flow_ctrl_set, 206 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set, 207 }; 208 209 static int axgbe_phy_reset(struct axgbe_port *pdata) 210 { 211 pdata->phy_link = -1; 212 pdata->phy_speed = SPEED_UNKNOWN; 213 return pdata->phy_if.phy_reset(pdata); 214 } 215 216 /* 217 * Interrupt handler triggered by NIC for handling 218 * specific interrupt. 219 * 220 * @param handle 221 * Pointer to interrupt handle. 222 * @param param 223 * The address of parameter (struct rte_eth_dev *) regsitered before. 224 * 225 * @return 226 * void 227 */ 228 static void 229 axgbe_dev_interrupt_handler(void *param) 230 { 231 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 232 struct axgbe_port *pdata = dev->data->dev_private; 233 unsigned int dma_isr, dma_ch_isr; 234 235 pdata->phy_if.an_isr(pdata); 236 /*DMA related interrupts*/ 237 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 238 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr); 239 if (dma_isr) { 240 if (dma_isr & 1) { 241 dma_ch_isr = 242 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 243 pdata->rx_queues[0], 244 DMA_CH_SR); 245 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr); 246 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 247 pdata->rx_queues[0], 248 DMA_CH_SR, dma_ch_isr); 249 } 250 } 251 /* Unmask interrupts since disabled after generation */ 252 rte_intr_ack(&pdata->pci_dev->intr_handle); 253 } 254 255 /* 256 * Configure device link speed and setup link. 257 * It returns 0 on success. 258 */ 259 static int 260 axgbe_dev_configure(struct rte_eth_dev *dev) 261 { 262 struct axgbe_port *pdata = dev->data->dev_private; 263 /* Checksum offload to hardware */ 264 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 265 DEV_RX_OFFLOAD_CHECKSUM; 266 return 0; 267 } 268 269 static int 270 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 271 { 272 struct axgbe_port *pdata = dev->data->dev_private; 273 274 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 275 pdata->rss_enable = 1; 276 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 277 pdata->rss_enable = 0; 278 else 279 return -1; 280 return 0; 281 } 282 283 static int 284 axgbe_dev_start(struct rte_eth_dev *dev) 285 { 286 struct axgbe_port *pdata = dev->data->dev_private; 287 int ret; 288 struct rte_eth_dev_data *dev_data = dev->data; 289 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len; 290 291 dev->dev_ops = &axgbe_eth_dev_ops; 292 293 PMD_INIT_FUNC_TRACE(); 294 295 /* Multiqueue RSS */ 296 ret = axgbe_dev_rx_mq_config(dev); 297 if (ret) { 298 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 299 return ret; 300 } 301 ret = axgbe_phy_reset(pdata); 302 if (ret) { 303 PMD_DRV_LOG(ERR, "phy reset failed\n"); 304 return ret; 305 } 306 ret = pdata->hw_if.init(pdata); 307 if (ret) { 308 PMD_DRV_LOG(ERR, "dev_init failed\n"); 309 return ret; 310 } 311 312 /* enable uio/vfio intr/eventfd mapping */ 313 rte_intr_enable(&pdata->pci_dev->intr_handle); 314 315 /* phy start*/ 316 pdata->phy_if.phy_start(pdata); 317 axgbe_dev_enable_tx(dev); 318 axgbe_dev_enable_rx(dev); 319 320 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); 321 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); 322 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || 323 max_pkt_len > pdata->rx_buf_size) 324 dev_data->scattered_rx = 1; 325 326 /* Scatter Rx handling */ 327 if (dev_data->scattered_rx) 328 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; 329 else 330 dev->rx_pkt_burst = &axgbe_recv_pkts; 331 332 return 0; 333 } 334 335 /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 336 static void 337 axgbe_dev_stop(struct rte_eth_dev *dev) 338 { 339 struct axgbe_port *pdata = dev->data->dev_private; 340 341 PMD_INIT_FUNC_TRACE(); 342 343 rte_intr_disable(&pdata->pci_dev->intr_handle); 344 345 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) 346 return; 347 348 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 349 axgbe_dev_disable_tx(dev); 350 axgbe_dev_disable_rx(dev); 351 352 pdata->phy_if.phy_stop(pdata); 353 pdata->hw_if.exit(pdata); 354 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 355 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 356 } 357 358 /* Clear all resources like TX/RX queues. */ 359 static void 360 axgbe_dev_close(struct rte_eth_dev *dev) 361 { 362 axgbe_dev_clear_queues(dev); 363 } 364 365 static int 366 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 367 { 368 struct axgbe_port *pdata = dev->data->dev_private; 369 370 PMD_INIT_FUNC_TRACE(); 371 372 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 373 374 return 0; 375 } 376 377 static int 378 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 379 { 380 struct axgbe_port *pdata = dev->data->dev_private; 381 382 PMD_INIT_FUNC_TRACE(); 383 384 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 385 386 return 0; 387 } 388 389 static int 390 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 391 { 392 struct axgbe_port *pdata = dev->data->dev_private; 393 394 PMD_INIT_FUNC_TRACE(); 395 396 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 397 return 0; 398 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 399 400 return 0; 401 } 402 403 static int 404 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 405 { 406 struct axgbe_port *pdata = dev->data->dev_private; 407 408 PMD_INIT_FUNC_TRACE(); 409 410 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 411 return 0; 412 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 413 414 return 0; 415 } 416 417 static int 418 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 419 { 420 struct axgbe_port *pdata = dev->data->dev_private; 421 422 /* Set Default MAC Addr */ 423 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0); 424 425 return 0; 426 } 427 428 static int 429 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 430 uint32_t index, uint32_t pool __rte_unused) 431 { 432 struct axgbe_port *pdata = dev->data->dev_private; 433 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 434 435 if (index > hw_feat->addn_mac) { 436 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 437 return -EINVAL; 438 } 439 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index); 440 return 0; 441 } 442 443 static void 444 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 445 { 446 struct axgbe_port *pdata = dev->data->dev_private; 447 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 448 449 if (index > hw_feat->addn_mac) { 450 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 451 return; 452 } 453 axgbe_set_mac_addn_addr(pdata, NULL, index); 454 } 455 456 static int 457 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 458 struct rte_ether_addr *mc_addr_set, 459 uint32_t nb_mc_addr) 460 { 461 struct axgbe_port *pdata = dev->data->dev_private; 462 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 463 uint32_t index = 1; /* 0 is always default mac */ 464 uint32_t i; 465 466 if (nb_mc_addr > hw_feat->addn_mac) { 467 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr); 468 return -EINVAL; 469 } 470 471 /* clear unicast addresses */ 472 for (i = 1; i < hw_feat->addn_mac; i++) { 473 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) 474 continue; 475 memset(&dev->data->mac_addrs[i], 0, 476 sizeof(struct rte_ether_addr)); 477 } 478 479 while (nb_mc_addr--) 480 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++); 481 482 return 0; 483 } 484 485 static int 486 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 487 struct rte_ether_addr *mac_addr, uint8_t add) 488 { 489 struct axgbe_port *pdata = dev->data->dev_private; 490 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 491 492 if (!hw_feat->hash_table_size) { 493 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 494 return -ENOTSUP; 495 } 496 497 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add); 498 499 if (pdata->uc_hash_mac_addr > 0) { 500 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 501 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 502 } else { 503 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 504 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 505 } 506 return 0; 507 } 508 509 static int 510 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add) 511 { 512 struct axgbe_port *pdata = dev->data->dev_private; 513 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 514 uint32_t index; 515 516 if (!hw_feat->hash_table_size) { 517 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 518 return -ENOTSUP; 519 } 520 521 for (index = 0; index < pdata->hash_table_count; index++) { 522 if (add) 523 pdata->uc_hash_table[index] = ~0; 524 else 525 pdata->uc_hash_table[index] = 0; 526 527 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n", 528 add ? "set" : "clear", index); 529 530 AXGMAC_IOWRITE(pdata, MAC_HTR(index), 531 pdata->uc_hash_table[index]); 532 } 533 534 if (add) { 535 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 536 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 537 } else { 538 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 539 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 540 } 541 return 0; 542 } 543 544 /* return 0 means link status changed, -1 means not changed */ 545 static int 546 axgbe_dev_link_update(struct rte_eth_dev *dev, 547 int wait_to_complete __rte_unused) 548 { 549 struct axgbe_port *pdata = dev->data->dev_private; 550 struct rte_eth_link link; 551 int ret = 0; 552 553 PMD_INIT_FUNC_TRACE(); 554 rte_delay_ms(800); 555 556 pdata->phy_if.phy_status(pdata); 557 558 memset(&link, 0, sizeof(struct rte_eth_link)); 559 link.link_duplex = pdata->phy.duplex; 560 link.link_status = pdata->phy_link; 561 link.link_speed = pdata->phy_speed; 562 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 563 ETH_LINK_SPEED_FIXED); 564 ret = rte_eth_linkstatus_set(dev, &link); 565 if (ret == -1) 566 PMD_DRV_LOG(ERR, "No change in link status\n"); 567 568 return ret; 569 } 570 571 static int 572 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 573 { 574 struct axgbe_port *pdata = dev->data->dev_private; 575 576 if (regs->data == NULL) { 577 regs->length = axgbe_regs_get_count(pdata); 578 regs->width = sizeof(uint32_t); 579 return 0; 580 } 581 582 /* Only full register dump is supported */ 583 if (regs->length && 584 regs->length != (uint32_t)axgbe_regs_get_count(pdata)) 585 return -ENOTSUP; 586 587 regs->version = pdata->pci_dev->id.vendor_id << 16 | 588 pdata->pci_dev->id.device_id; 589 axgbe_regs_dump(pdata, regs->data); 590 return 0; 591 } 592 static void axgbe_read_mmc_stats(struct axgbe_port *pdata) 593 { 594 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 595 596 /* Freeze counters */ 597 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 598 599 /* Tx counters */ 600 stats->txoctetcount_gb += 601 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 602 stats->txoctetcount_gb += 603 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32); 604 605 stats->txframecount_gb += 606 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 607 stats->txframecount_gb += 608 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32); 609 610 stats->txbroadcastframes_g += 611 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 612 stats->txbroadcastframes_g += 613 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32); 614 615 stats->txmulticastframes_g += 616 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 617 stats->txmulticastframes_g += 618 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32); 619 620 stats->tx64octets_gb += 621 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 622 stats->tx64octets_gb += 623 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32); 624 625 stats->tx65to127octets_gb += 626 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 627 stats->tx65to127octets_gb += 628 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32); 629 630 stats->tx128to255octets_gb += 631 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 632 stats->tx128to255octets_gb += 633 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32); 634 635 stats->tx256to511octets_gb += 636 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 637 stats->tx256to511octets_gb += 638 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32); 639 640 stats->tx512to1023octets_gb += 641 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 642 stats->tx512to1023octets_gb += 643 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32); 644 645 stats->tx1024tomaxoctets_gb += 646 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 647 stats->tx1024tomaxoctets_gb += 648 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32); 649 650 stats->txunicastframes_gb += 651 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 652 stats->txunicastframes_gb += 653 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32); 654 655 stats->txmulticastframes_gb += 656 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 657 stats->txmulticastframes_gb += 658 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32); 659 660 stats->txbroadcastframes_g += 661 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 662 stats->txbroadcastframes_g += 663 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32); 664 665 stats->txunderflowerror += 666 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 667 stats->txunderflowerror += 668 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32); 669 670 stats->txoctetcount_g += 671 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 672 stats->txoctetcount_g += 673 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32); 674 675 stats->txframecount_g += 676 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 677 stats->txframecount_g += 678 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32); 679 680 stats->txpauseframes += 681 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 682 stats->txpauseframes += 683 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32); 684 685 stats->txvlanframes_g += 686 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 687 stats->txvlanframes_g += 688 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32); 689 690 /* Rx counters */ 691 stats->rxframecount_gb += 692 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 693 stats->rxframecount_gb += 694 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32); 695 696 stats->rxoctetcount_gb += 697 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 698 stats->rxoctetcount_gb += 699 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32); 700 701 stats->rxoctetcount_g += 702 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 703 stats->rxoctetcount_g += 704 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32); 705 706 stats->rxbroadcastframes_g += 707 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 708 stats->rxbroadcastframes_g += 709 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32); 710 711 stats->rxmulticastframes_g += 712 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 713 stats->rxmulticastframes_g += 714 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32); 715 716 stats->rxcrcerror += 717 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 718 stats->rxcrcerror += 719 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32); 720 721 stats->rxrunterror += 722 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 723 724 stats->rxjabbererror += 725 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 726 727 stats->rxundersize_g += 728 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 729 730 stats->rxoversize_g += 731 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 732 733 stats->rx64octets_gb += 734 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 735 stats->rx64octets_gb += 736 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32); 737 738 stats->rx65to127octets_gb += 739 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 740 stats->rx65to127octets_gb += 741 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32); 742 743 stats->rx128to255octets_gb += 744 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 745 stats->rx128to255octets_gb += 746 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32); 747 748 stats->rx256to511octets_gb += 749 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 750 stats->rx256to511octets_gb += 751 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32); 752 753 stats->rx512to1023octets_gb += 754 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 755 stats->rx512to1023octets_gb += 756 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32); 757 758 stats->rx1024tomaxoctets_gb += 759 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 760 stats->rx1024tomaxoctets_gb += 761 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32); 762 763 stats->rxunicastframes_g += 764 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 765 stats->rxunicastframes_g += 766 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32); 767 768 stats->rxlengtherror += 769 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 770 stats->rxlengtherror += 771 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32); 772 773 stats->rxoutofrangetype += 774 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 775 stats->rxoutofrangetype += 776 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32); 777 778 stats->rxpauseframes += 779 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 780 stats->rxpauseframes += 781 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32); 782 783 stats->rxfifooverflow += 784 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 785 stats->rxfifooverflow += 786 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32); 787 788 stats->rxvlanframes_gb += 789 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 790 stats->rxvlanframes_gb += 791 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32); 792 793 stats->rxwatchdogerror += 794 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 795 796 /* Un-freeze counters */ 797 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 798 } 799 800 static int 801 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 802 unsigned int n) 803 { 804 struct axgbe_port *pdata = dev->data->dev_private; 805 unsigned int i; 806 807 if (!stats) 808 return 0; 809 810 axgbe_read_mmc_stats(pdata); 811 812 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) { 813 stats[i].id = i; 814 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats + 815 axgbe_xstats_strings[i].offset); 816 } 817 818 return i; 819 } 820 821 static int 822 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 823 struct rte_eth_xstat_name *xstats_names, 824 unsigned int n) 825 { 826 unsigned int i; 827 828 if (n >= AXGBE_XSTATS_COUNT && xstats_names) { 829 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) { 830 snprintf(xstats_names[i].name, 831 RTE_ETH_XSTATS_NAME_SIZE, "%s", 832 axgbe_xstats_strings[i].name); 833 } 834 } 835 836 return AXGBE_XSTATS_COUNT; 837 } 838 839 static int 840 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 841 uint64_t *values, unsigned int n) 842 { 843 unsigned int i; 844 uint64_t values_copy[AXGBE_XSTATS_COUNT]; 845 846 if (!ids) { 847 struct axgbe_port *pdata = dev->data->dev_private; 848 849 if (n < AXGBE_XSTATS_COUNT) 850 return AXGBE_XSTATS_COUNT; 851 852 axgbe_read_mmc_stats(pdata); 853 854 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { 855 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats + 856 axgbe_xstats_strings[i].offset); 857 } 858 859 return i; 860 } 861 862 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT); 863 864 for (i = 0; i < n; i++) { 865 if (ids[i] >= AXGBE_XSTATS_COUNT) { 866 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 867 return -1; 868 } 869 values[i] = values_copy[ids[i]]; 870 } 871 return n; 872 } 873 874 static int 875 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 876 struct rte_eth_xstat_name *xstats_names, 877 const uint64_t *ids, 878 unsigned int size) 879 { 880 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT]; 881 unsigned int i; 882 883 if (!ids) 884 return axgbe_dev_xstats_get_names(dev, xstats_names, size); 885 886 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size); 887 888 for (i = 0; i < size; i++) { 889 if (ids[i] >= AXGBE_XSTATS_COUNT) { 890 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 891 return -1; 892 } 893 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 894 } 895 return size; 896 } 897 898 static int 899 axgbe_dev_xstats_reset(struct rte_eth_dev *dev) 900 { 901 struct axgbe_port *pdata = dev->data->dev_private; 902 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 903 904 /* MMC registers are configured for reset on read */ 905 axgbe_read_mmc_stats(pdata); 906 907 /* Reset stats */ 908 memset(stats, 0, sizeof(*stats)); 909 910 return 0; 911 } 912 913 static int 914 axgbe_dev_stats_get(struct rte_eth_dev *dev, 915 struct rte_eth_stats *stats) 916 { 917 struct axgbe_rx_queue *rxq; 918 struct axgbe_tx_queue *txq; 919 struct axgbe_port *pdata = dev->data->dev_private; 920 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats; 921 unsigned int i; 922 923 axgbe_read_mmc_stats(pdata); 924 925 stats->imissed = mmc_stats->rxfifooverflow; 926 927 for (i = 0; i < dev->data->nb_rx_queues; i++) { 928 rxq = dev->data->rx_queues[i]; 929 stats->q_ipackets[i] = rxq->pkts; 930 stats->ipackets += rxq->pkts; 931 stats->q_ibytes[i] = rxq->bytes; 932 stats->ibytes += rxq->bytes; 933 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed; 934 stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed; 935 stats->ierrors += rxq->errors; 936 } 937 938 for (i = 0; i < dev->data->nb_tx_queues; i++) { 939 txq = dev->data->tx_queues[i]; 940 stats->q_opackets[i] = txq->pkts; 941 stats->opackets += txq->pkts; 942 stats->q_obytes[i] = txq->bytes; 943 stats->obytes += txq->bytes; 944 stats->oerrors += txq->errors; 945 } 946 947 return 0; 948 } 949 950 static int 951 axgbe_dev_stats_reset(struct rte_eth_dev *dev) 952 { 953 struct axgbe_rx_queue *rxq; 954 struct axgbe_tx_queue *txq; 955 unsigned int i; 956 957 for (i = 0; i < dev->data->nb_rx_queues; i++) { 958 rxq = dev->data->rx_queues[i]; 959 rxq->pkts = 0; 960 rxq->bytes = 0; 961 rxq->errors = 0; 962 rxq->rx_mbuf_alloc_failed = 0; 963 } 964 for (i = 0; i < dev->data->nb_tx_queues; i++) { 965 txq = dev->data->tx_queues[i]; 966 txq->pkts = 0; 967 txq->bytes = 0; 968 txq->errors = 0; 969 } 970 971 return 0; 972 } 973 974 static int 975 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 976 { 977 struct axgbe_port *pdata = dev->data->dev_private; 978 979 dev_info->max_rx_queues = pdata->rx_ring_count; 980 dev_info->max_tx_queues = pdata->tx_ring_count; 981 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 982 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 983 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1; 984 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size; 985 dev_info->speed_capa = ETH_LINK_SPEED_10G; 986 987 dev_info->rx_offload_capa = 988 DEV_RX_OFFLOAD_IPV4_CKSUM | 989 DEV_RX_OFFLOAD_UDP_CKSUM | 990 DEV_RX_OFFLOAD_TCP_CKSUM | 991 DEV_RX_OFFLOAD_JUMBO_FRAME | 992 DEV_RX_OFFLOAD_SCATTER | 993 DEV_RX_OFFLOAD_KEEP_CRC; 994 995 dev_info->tx_offload_capa = 996 DEV_TX_OFFLOAD_IPV4_CKSUM | 997 DEV_TX_OFFLOAD_UDP_CKSUM | 998 DEV_TX_OFFLOAD_TCP_CKSUM; 999 1000 if (pdata->hw_feat.rss) { 1001 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 1002 dev_info->reta_size = pdata->hw_feat.hash_table_size; 1003 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 1004 } 1005 1006 dev_info->rx_desc_lim = rx_desc_lim; 1007 dev_info->tx_desc_lim = tx_desc_lim; 1008 1009 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1010 .rx_free_thresh = AXGBE_RX_FREE_THRESH, 1011 }; 1012 1013 dev_info->default_txconf = (struct rte_eth_txconf) { 1014 .tx_free_thresh = AXGBE_TX_FREE_THRESH, 1015 }; 1016 1017 return 0; 1018 } 1019 1020 static int 1021 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1022 { 1023 struct axgbe_port *pdata = dev->data->dev_private; 1024 struct xgbe_fc_info fc = pdata->fc; 1025 unsigned int reg, reg_val = 0; 1026 1027 reg = MAC_Q0TFCR; 1028 reg_val = AXGMAC_IOREAD(pdata, reg); 1029 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA); 1030 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD); 1031 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT); 1032 fc.autoneg = pdata->pause_autoneg; 1033 1034 if (pdata->rx_pause && pdata->tx_pause) 1035 fc.mode = RTE_FC_FULL; 1036 else if (pdata->rx_pause) 1037 fc.mode = RTE_FC_RX_PAUSE; 1038 else if (pdata->tx_pause) 1039 fc.mode = RTE_FC_TX_PAUSE; 1040 else 1041 fc.mode = RTE_FC_NONE; 1042 1043 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024; 1044 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024; 1045 fc_conf->pause_time = fc.pause_time[0]; 1046 fc_conf->send_xon = fc.send_xon; 1047 fc_conf->mode = fc.mode; 1048 1049 return 0; 1050 } 1051 1052 static int 1053 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1054 { 1055 struct axgbe_port *pdata = dev->data->dev_private; 1056 struct xgbe_fc_info fc = pdata->fc; 1057 unsigned int reg, reg_val = 0; 1058 reg = MAC_Q0TFCR; 1059 1060 pdata->pause_autoneg = fc_conf->autoneg; 1061 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1062 fc.send_xon = fc_conf->send_xon; 1063 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA, 1064 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water)); 1065 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD, 1066 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water)); 1067 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time); 1068 AXGMAC_IOWRITE(pdata, reg, reg_val); 1069 fc.mode = fc_conf->mode; 1070 1071 if (fc.mode == RTE_FC_FULL) { 1072 pdata->tx_pause = 1; 1073 pdata->rx_pause = 1; 1074 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1075 pdata->tx_pause = 0; 1076 pdata->rx_pause = 1; 1077 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1078 pdata->tx_pause = 1; 1079 pdata->rx_pause = 0; 1080 } else { 1081 pdata->tx_pause = 0; 1082 pdata->rx_pause = 0; 1083 } 1084 1085 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1086 pdata->hw_if.config_tx_flow_control(pdata); 1087 1088 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1089 pdata->hw_if.config_rx_flow_control(pdata); 1090 1091 pdata->hw_if.config_flow_control(pdata); 1092 pdata->phy.tx_pause = pdata->tx_pause; 1093 pdata->phy.rx_pause = pdata->rx_pause; 1094 1095 return 0; 1096 } 1097 1098 static int 1099 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 1100 struct rte_eth_pfc_conf *pfc_conf) 1101 { 1102 struct axgbe_port *pdata = dev->data->dev_private; 1103 struct xgbe_fc_info fc = pdata->fc; 1104 uint8_t tc_num; 1105 1106 tc_num = pdata->pfc_map[pfc_conf->priority]; 1107 1108 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { 1109 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", 1110 pdata->hw_feat.tc_cnt); 1111 return -EINVAL; 1112 } 1113 1114 pdata->pause_autoneg = pfc_conf->fc.autoneg; 1115 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1116 fc.send_xon = pfc_conf->fc.send_xon; 1117 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA, 1118 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water)); 1119 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD, 1120 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water)); 1121 1122 switch (tc_num) { 1123 case 0: 1124 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1125 PSTC0, pfc_conf->fc.pause_time); 1126 break; 1127 case 1: 1128 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1129 PSTC1, pfc_conf->fc.pause_time); 1130 break; 1131 case 2: 1132 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1133 PSTC2, pfc_conf->fc.pause_time); 1134 break; 1135 case 3: 1136 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1137 PSTC3, pfc_conf->fc.pause_time); 1138 break; 1139 case 4: 1140 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1141 PSTC4, pfc_conf->fc.pause_time); 1142 break; 1143 case 5: 1144 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1145 PSTC5, pfc_conf->fc.pause_time); 1146 break; 1147 case 7: 1148 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1149 PSTC6, pfc_conf->fc.pause_time); 1150 break; 1151 case 6: 1152 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1153 PSTC7, pfc_conf->fc.pause_time); 1154 break; 1155 } 1156 1157 fc.mode = pfc_conf->fc.mode; 1158 1159 if (fc.mode == RTE_FC_FULL) { 1160 pdata->tx_pause = 1; 1161 pdata->rx_pause = 1; 1162 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1163 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1164 pdata->tx_pause = 0; 1165 pdata->rx_pause = 1; 1166 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1167 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1168 pdata->tx_pause = 1; 1169 pdata->rx_pause = 0; 1170 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1171 } else { 1172 pdata->tx_pause = 0; 1173 pdata->rx_pause = 0; 1174 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1175 } 1176 1177 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1178 pdata->hw_if.config_tx_flow_control(pdata); 1179 1180 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1181 pdata->hw_if.config_rx_flow_control(pdata); 1182 pdata->hw_if.config_flow_control(pdata); 1183 pdata->phy.tx_pause = pdata->tx_pause; 1184 pdata->phy.rx_pause = pdata->rx_pause; 1185 1186 return 0; 1187 } 1188 1189 static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 1190 { 1191 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 1192 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 1193 1194 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 1195 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 1196 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 1197 1198 memset(hw_feat, 0, sizeof(*hw_feat)); 1199 1200 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 1201 1202 /* Hardware feature register 0 */ 1203 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 1204 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 1205 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 1206 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 1207 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 1208 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 1209 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 1210 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 1211 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 1212 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 1213 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 1214 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 1215 ADDMACADRSEL); 1216 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 1217 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 1218 1219 /* Hardware feature register 1 */ 1220 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1221 RXFIFOSIZE); 1222 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1223 TXFIFOSIZE); 1224 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 1225 MAC_HWF1R, ADVTHWORD); 1226 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 1227 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 1228 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 1229 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 1230 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 1231 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 1232 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 1233 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1234 HASHTBLSZ); 1235 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1236 L3L4FNUM); 1237 1238 /* Hardware feature register 2 */ 1239 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 1240 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 1241 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 1242 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 1243 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 1244 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 1245 AUXSNAPNUM); 1246 1247 /* Translate the Hash Table size into actual number */ 1248 switch (hw_feat->hash_table_size) { 1249 case 0: 1250 break; 1251 case 1: 1252 hw_feat->hash_table_size = 64; 1253 break; 1254 case 2: 1255 hw_feat->hash_table_size = 128; 1256 break; 1257 case 3: 1258 hw_feat->hash_table_size = 256; 1259 break; 1260 } 1261 1262 /* Translate the address width setting into actual number */ 1263 switch (hw_feat->dma_width) { 1264 case 0: 1265 hw_feat->dma_width = 32; 1266 break; 1267 case 1: 1268 hw_feat->dma_width = 40; 1269 break; 1270 case 2: 1271 hw_feat->dma_width = 48; 1272 break; 1273 default: 1274 hw_feat->dma_width = 32; 1275 } 1276 1277 /* The Queue, Channel and TC counts are zero based so increment them 1278 * to get the actual number 1279 */ 1280 hw_feat->rx_q_cnt++; 1281 hw_feat->tx_q_cnt++; 1282 hw_feat->rx_ch_cnt++; 1283 hw_feat->tx_ch_cnt++; 1284 hw_feat->tc_cnt++; 1285 1286 /* Translate the fifo sizes into actual numbers */ 1287 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 1288 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 1289 } 1290 1291 static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 1292 { 1293 axgbe_init_function_ptrs_dev(&pdata->hw_if); 1294 axgbe_init_function_ptrs_phy(&pdata->phy_if); 1295 axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 1296 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 1297 } 1298 1299 static void axgbe_set_counts(struct axgbe_port *pdata) 1300 { 1301 /* Set all the function pointers */ 1302 axgbe_init_all_fptrs(pdata); 1303 1304 /* Populate the hardware features */ 1305 axgbe_get_all_hw_features(pdata); 1306 1307 /* Set default max values if not provided */ 1308 if (!pdata->tx_max_channel_count) 1309 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 1310 if (!pdata->rx_max_channel_count) 1311 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 1312 1313 if (!pdata->tx_max_q_count) 1314 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 1315 if (!pdata->rx_max_q_count) 1316 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 1317 1318 /* Calculate the number of Tx and Rx rings to be created 1319 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 1320 * the number of Tx queues to the number of Tx channels 1321 * enabled 1322 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 1323 * number of Rx queues or maximum allowed 1324 */ 1325 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 1326 pdata->tx_max_channel_count); 1327 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 1328 pdata->tx_max_q_count); 1329 1330 pdata->tx_q_count = pdata->tx_ring_count; 1331 1332 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 1333 pdata->rx_max_channel_count); 1334 1335 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 1336 pdata->rx_max_q_count); 1337 } 1338 1339 static void axgbe_default_config(struct axgbe_port *pdata) 1340 { 1341 pdata->pblx8 = DMA_PBL_X8_ENABLE; 1342 pdata->tx_sf_mode = MTL_TSF_ENABLE; 1343 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 1344 pdata->tx_pbl = DMA_PBL_32; 1345 pdata->tx_osp_mode = DMA_OSP_ENABLE; 1346 pdata->rx_sf_mode = MTL_RSF_ENABLE; 1347 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 1348 pdata->rx_pbl = DMA_PBL_32; 1349 pdata->pause_autoneg = 1; 1350 pdata->tx_pause = 0; 1351 pdata->rx_pause = 0; 1352 pdata->phy_speed = SPEED_UNKNOWN; 1353 pdata->power_down = 0; 1354 } 1355 1356 static int 1357 pci_device_cmp(const struct rte_device *dev, const void *_pci_id) 1358 { 1359 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); 1360 const struct rte_pci_id *pcid = _pci_id; 1361 1362 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID && 1363 pdev->id.device_id == pcid->device_id) 1364 return 0; 1365 return 1; 1366 } 1367 1368 static bool 1369 pci_search_device(int device_id) 1370 { 1371 struct rte_bus *pci_bus; 1372 struct rte_pci_id dev_id; 1373 1374 dev_id.device_id = device_id; 1375 pci_bus = rte_bus_find_by_name("pci"); 1376 return (pci_bus != NULL) && 1377 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL); 1378 } 1379 1380 /* 1381 * It returns 0 on success. 1382 */ 1383 static int 1384 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 1385 { 1386 PMD_INIT_FUNC_TRACE(); 1387 struct axgbe_port *pdata; 1388 struct rte_pci_device *pci_dev; 1389 uint32_t reg, mac_lo, mac_hi; 1390 uint32_t len; 1391 int ret; 1392 1393 eth_dev->dev_ops = &axgbe_eth_dev_ops; 1394 1395 /* 1396 * For secondary processes, we don't initialise any further as primary 1397 * has already done this work. 1398 */ 1399 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1400 return 0; 1401 1402 pdata = eth_dev->data->dev_private; 1403 /* initial state */ 1404 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 1405 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 1406 pdata->eth_dev = eth_dev; 1407 1408 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1409 pdata->pci_dev = pci_dev; 1410 1411 /* 1412 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE 1413 */ 1414 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) { 1415 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; 1416 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; 1417 } else { 1418 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 1419 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 1420 } 1421 1422 pdata->xgmac_regs = 1423 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 1424 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 1425 + AXGBE_MAC_PROP_OFFSET); 1426 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 1427 + AXGBE_I2C_CTRL_OFFSET); 1428 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 1429 1430 /* version specific driver data*/ 1431 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 1432 pdata->vdata = &axgbe_v2a; 1433 else 1434 pdata->vdata = &axgbe_v2b; 1435 1436 /* Configure the PCS indirect addressing support */ 1437 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); 1438 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 1439 pdata->xpcs_window <<= 6; 1440 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 1441 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 1442 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 1443 1444 PMD_INIT_LOG(DEBUG, 1445 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 1446 pdata->xpcs_window_size, pdata->xpcs_window_mask); 1447 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 1448 1449 /* Retrieve the MAC address */ 1450 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 1451 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 1452 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 1453 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 1454 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 1455 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 1456 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 1457 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 1458 1459 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS; 1460 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0); 1461 1462 if (!eth_dev->data->mac_addrs) { 1463 PMD_INIT_LOG(ERR, 1464 "Failed to alloc %u bytes needed to " 1465 "store MAC addresses", len); 1466 return -ENOMEM; 1467 } 1468 1469 /* Allocate memory for storing hash filter MAC addresses */ 1470 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS; 1471 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr", 1472 len, 0); 1473 1474 if (eth_dev->data->hash_mac_addrs == NULL) { 1475 PMD_INIT_LOG(ERR, 1476 "Failed to allocate %d bytes needed to " 1477 "store MAC addresses", len); 1478 return -ENOMEM; 1479 } 1480 1481 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 1482 rte_eth_random_addr(pdata->mac_addr.addr_bytes); 1483 1484 /* Copy the permanent MAC address */ 1485 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 1486 1487 /* Clock settings */ 1488 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 1489 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 1490 1491 /* Set the DMA coherency values */ 1492 pdata->coherent = 1; 1493 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 1494 pdata->arcache = AXGBE_DMA_OS_ARCACHE; 1495 pdata->awcache = AXGBE_DMA_OS_AWCACHE; 1496 1497 /* Set the maximum channels and queues */ 1498 reg = XP_IOREAD(pdata, XP_PROP_1); 1499 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 1500 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 1501 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 1502 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 1503 1504 /* Set the hardware channel and queue counts */ 1505 axgbe_set_counts(pdata); 1506 1507 /* Set the maximum fifo amounts */ 1508 reg = XP_IOREAD(pdata, XP_PROP_2); 1509 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 1510 pdata->tx_max_fifo_size *= 16384; 1511 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 1512 pdata->vdata->tx_max_fifo_size); 1513 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 1514 pdata->rx_max_fifo_size *= 16384; 1515 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 1516 pdata->vdata->rx_max_fifo_size); 1517 /* Issue software reset to DMA */ 1518 ret = pdata->hw_if.exit(pdata); 1519 if (ret) 1520 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 1521 1522 /* Set default configuration data */ 1523 axgbe_default_config(pdata); 1524 1525 /* Set default max values if not provided */ 1526 if (!pdata->tx_max_fifo_size) 1527 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 1528 if (!pdata->rx_max_fifo_size) 1529 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 1530 1531 pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 1532 pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 1533 pthread_mutex_init(&pdata->xpcs_mutex, NULL); 1534 pthread_mutex_init(&pdata->i2c_mutex, NULL); 1535 pthread_mutex_init(&pdata->an_mutex, NULL); 1536 pthread_mutex_init(&pdata->phy_mutex, NULL); 1537 1538 ret = pdata->phy_if.phy_init(pdata); 1539 if (ret) { 1540 rte_free(eth_dev->data->mac_addrs); 1541 eth_dev->data->mac_addrs = NULL; 1542 return ret; 1543 } 1544 1545 rte_intr_callback_register(&pci_dev->intr_handle, 1546 axgbe_dev_interrupt_handler, 1547 (void *)eth_dev); 1548 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1549 eth_dev->data->port_id, pci_dev->id.vendor_id, 1550 pci_dev->id.device_id); 1551 1552 return 0; 1553 } 1554 1555 static int 1556 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1557 { 1558 struct rte_pci_device *pci_dev; 1559 1560 PMD_INIT_FUNC_TRACE(); 1561 1562 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1563 return 0; 1564 1565 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1566 eth_dev->dev_ops = NULL; 1567 eth_dev->rx_pkt_burst = NULL; 1568 eth_dev->tx_pkt_burst = NULL; 1569 axgbe_dev_clear_queues(eth_dev); 1570 1571 /* disable uio intr before callback unregister */ 1572 rte_intr_disable(&pci_dev->intr_handle); 1573 rte_intr_callback_unregister(&pci_dev->intr_handle, 1574 axgbe_dev_interrupt_handler, 1575 (void *)eth_dev); 1576 1577 return 0; 1578 } 1579 1580 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1581 struct rte_pci_device *pci_dev) 1582 { 1583 return rte_eth_dev_pci_generic_probe(pci_dev, 1584 sizeof(struct axgbe_port), eth_axgbe_dev_init); 1585 } 1586 1587 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 1588 { 1589 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 1590 } 1591 1592 static struct rte_pci_driver rte_axgbe_pmd = { 1593 .id_table = pci_id_axgbe_map, 1594 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1595 .probe = eth_axgbe_pci_probe, 1596 .remove = eth_axgbe_pci_remove, 1597 }; 1598 1599 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 1600 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 1601 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 1602 1603 RTE_INIT(axgbe_init_log) 1604 { 1605 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); 1606 if (axgbe_logtype_init >= 0) 1607 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); 1608 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); 1609 if (axgbe_logtype_driver >= 0) 1610 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); 1611 } 1612