1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_rxtx.h" 7 #include "axgbe_ethdev.h" 8 #include "axgbe_common.h" 9 #include "axgbe_phy.h" 10 #include "axgbe_regs.h" 11 12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 14 static int axgbe_dev_configure(struct rte_eth_dev *dev); 15 static int axgbe_dev_start(struct rte_eth_dev *dev); 16 static void axgbe_dev_stop(struct rte_eth_dev *dev); 17 static void axgbe_dev_interrupt_handler(void *param); 18 static void axgbe_dev_close(struct rte_eth_dev *dev); 19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, 24 struct rte_ether_addr *mac_addr); 25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, 26 struct rte_ether_addr *mac_addr, 27 uint32_t index, 28 uint32_t vmdq); 29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 31 struct rte_ether_addr *mc_addr_set, 32 uint32_t nb_mc_addr); 33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 34 struct rte_ether_addr *mac_addr, 35 uint8_t add); 36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, 37 uint8_t add); 38 static int axgbe_dev_link_update(struct rte_eth_dev *dev, 39 int wait_to_complete); 40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev, 41 struct rte_dev_reg_info *regs); 42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 43 struct rte_eth_stats *stats); 44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev); 45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev, 46 struct rte_eth_xstat *stats, 47 unsigned int n); 48 static int 49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 50 struct rte_eth_xstat_name *xstats_names, 51 unsigned int size); 52 static int 53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, 54 const uint64_t *ids, 55 uint64_t *values, 56 unsigned int n); 57 static int 58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 59 struct rte_eth_xstat_name *xstats_names, 60 const uint64_t *ids, 61 unsigned int size); 62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev); 63 static int axgbe_dev_info_get(struct rte_eth_dev *dev, 64 struct rte_eth_dev_info *dev_info); 65 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev, 66 struct rte_eth_fc_conf *fc_conf); 67 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev, 68 struct rte_eth_fc_conf *fc_conf); 69 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 70 struct rte_eth_pfc_conf *pfc_conf); 71 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 72 struct rte_eth_rxq_info *qinfo); 73 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 74 struct rte_eth_txq_info *qinfo); 75 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 76 77 struct axgbe_xstats { 78 char name[RTE_ETH_XSTATS_NAME_SIZE]; 79 int offset; 80 }; 81 82 #define AXGMAC_MMC_STAT(_string, _var) \ 83 { _string, \ 84 offsetof(struct axgbe_mmc_stats, _var), \ 85 } 86 87 static const struct axgbe_xstats axgbe_xstats_strings[] = { 88 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), 89 AXGMAC_MMC_STAT("tx_packets", txframecount_gb), 90 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), 91 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), 92 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), 93 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), 94 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), 95 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), 96 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), 97 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), 98 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), 99 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), 100 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), 101 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes), 102 103 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), 104 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb), 105 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), 106 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), 107 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), 108 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), 109 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), 110 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), 111 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), 112 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), 113 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), 114 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), 115 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), 116 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), 117 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), 118 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), 119 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), 120 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror), 121 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), 122 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), 123 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), 124 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), 125 }; 126 127 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings) 128 129 /* The set of PCI devices this driver supports */ 130 #define AMD_PCI_VENDOR_ID 0x1022 131 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 132 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 133 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 134 135 int axgbe_logtype_init; 136 int axgbe_logtype_driver; 137 138 static const struct rte_pci_id pci_id_axgbe_map[] = { 139 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 140 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 141 { .vendor_id = 0, }, 142 }; 143 144 static struct axgbe_version_data axgbe_v2a = { 145 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 146 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 147 .mmc_64bit = 1, 148 .tx_max_fifo_size = 229376, 149 .rx_max_fifo_size = 229376, 150 .tx_tstamp_workaround = 1, 151 .ecc_support = 1, 152 .i2c_support = 1, 153 .an_cdr_workaround = 1, 154 }; 155 156 static struct axgbe_version_data axgbe_v2b = { 157 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 158 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 159 .mmc_64bit = 1, 160 .tx_max_fifo_size = 65536, 161 .rx_max_fifo_size = 65536, 162 .tx_tstamp_workaround = 1, 163 .ecc_support = 1, 164 .i2c_support = 1, 165 .an_cdr_workaround = 1, 166 }; 167 168 static const struct rte_eth_desc_lim rx_desc_lim = { 169 .nb_max = AXGBE_MAX_RING_DESC, 170 .nb_min = AXGBE_MIN_RING_DESC, 171 .nb_align = 8, 172 }; 173 174 static const struct rte_eth_desc_lim tx_desc_lim = { 175 .nb_max = AXGBE_MAX_RING_DESC, 176 .nb_min = AXGBE_MIN_RING_DESC, 177 .nb_align = 8, 178 }; 179 180 static const struct eth_dev_ops axgbe_eth_dev_ops = { 181 .dev_configure = axgbe_dev_configure, 182 .dev_start = axgbe_dev_start, 183 .dev_stop = axgbe_dev_stop, 184 .dev_close = axgbe_dev_close, 185 .promiscuous_enable = axgbe_dev_promiscuous_enable, 186 .promiscuous_disable = axgbe_dev_promiscuous_disable, 187 .allmulticast_enable = axgbe_dev_allmulticast_enable, 188 .allmulticast_disable = axgbe_dev_allmulticast_disable, 189 .mac_addr_set = axgbe_dev_mac_addr_set, 190 .mac_addr_add = axgbe_dev_mac_addr_add, 191 .mac_addr_remove = axgbe_dev_mac_addr_remove, 192 .set_mc_addr_list = axgbe_dev_set_mc_addr_list, 193 .uc_hash_table_set = axgbe_dev_uc_hash_table_set, 194 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set, 195 .link_update = axgbe_dev_link_update, 196 .get_reg = axgbe_dev_get_regs, 197 .stats_get = axgbe_dev_stats_get, 198 .stats_reset = axgbe_dev_stats_reset, 199 .xstats_get = axgbe_dev_xstats_get, 200 .xstats_reset = axgbe_dev_xstats_reset, 201 .xstats_get_names = axgbe_dev_xstats_get_names, 202 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id, 203 .xstats_get_by_id = axgbe_dev_xstats_get_by_id, 204 .dev_infos_get = axgbe_dev_info_get, 205 .rx_queue_setup = axgbe_dev_rx_queue_setup, 206 .rx_queue_release = axgbe_dev_rx_queue_release, 207 .tx_queue_setup = axgbe_dev_tx_queue_setup, 208 .tx_queue_release = axgbe_dev_tx_queue_release, 209 .flow_ctrl_get = axgbe_flow_ctrl_get, 210 .flow_ctrl_set = axgbe_flow_ctrl_set, 211 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set, 212 .rxq_info_get = axgbe_rxq_info_get, 213 .txq_info_get = axgbe_txq_info_get, 214 .dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get, 215 .rx_descriptor_status = axgbe_dev_rx_descriptor_status, 216 .tx_descriptor_status = axgbe_dev_tx_descriptor_status, 217 }; 218 219 static int axgbe_phy_reset(struct axgbe_port *pdata) 220 { 221 pdata->phy_link = -1; 222 pdata->phy_speed = SPEED_UNKNOWN; 223 return pdata->phy_if.phy_reset(pdata); 224 } 225 226 /* 227 * Interrupt handler triggered by NIC for handling 228 * specific interrupt. 229 * 230 * @param handle 231 * Pointer to interrupt handle. 232 * @param param 233 * The address of parameter (struct rte_eth_dev *) regsitered before. 234 * 235 * @return 236 * void 237 */ 238 static void 239 axgbe_dev_interrupt_handler(void *param) 240 { 241 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 242 struct axgbe_port *pdata = dev->data->dev_private; 243 unsigned int dma_isr, dma_ch_isr; 244 245 pdata->phy_if.an_isr(pdata); 246 /*DMA related interrupts*/ 247 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 248 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr); 249 if (dma_isr) { 250 if (dma_isr & 1) { 251 dma_ch_isr = 252 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 253 pdata->rx_queues[0], 254 DMA_CH_SR); 255 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr); 256 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 257 pdata->rx_queues[0], 258 DMA_CH_SR, dma_ch_isr); 259 } 260 } 261 /* Unmask interrupts since disabled after generation */ 262 rte_intr_ack(&pdata->pci_dev->intr_handle); 263 } 264 265 /* 266 * Configure device link speed and setup link. 267 * It returns 0 on success. 268 */ 269 static int 270 axgbe_dev_configure(struct rte_eth_dev *dev) 271 { 272 struct axgbe_port *pdata = dev->data->dev_private; 273 /* Checksum offload to hardware */ 274 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 275 DEV_RX_OFFLOAD_CHECKSUM; 276 return 0; 277 } 278 279 static int 280 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 281 { 282 struct axgbe_port *pdata = dev->data->dev_private; 283 284 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 285 pdata->rss_enable = 1; 286 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 287 pdata->rss_enable = 0; 288 else 289 return -1; 290 return 0; 291 } 292 293 static int 294 axgbe_dev_start(struct rte_eth_dev *dev) 295 { 296 struct axgbe_port *pdata = dev->data->dev_private; 297 int ret; 298 struct rte_eth_dev_data *dev_data = dev->data; 299 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len; 300 301 dev->dev_ops = &axgbe_eth_dev_ops; 302 303 PMD_INIT_FUNC_TRACE(); 304 305 /* Multiqueue RSS */ 306 ret = axgbe_dev_rx_mq_config(dev); 307 if (ret) { 308 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 309 return ret; 310 } 311 ret = axgbe_phy_reset(pdata); 312 if (ret) { 313 PMD_DRV_LOG(ERR, "phy reset failed\n"); 314 return ret; 315 } 316 ret = pdata->hw_if.init(pdata); 317 if (ret) { 318 PMD_DRV_LOG(ERR, "dev_init failed\n"); 319 return ret; 320 } 321 322 /* enable uio/vfio intr/eventfd mapping */ 323 rte_intr_enable(&pdata->pci_dev->intr_handle); 324 325 /* phy start*/ 326 pdata->phy_if.phy_start(pdata); 327 axgbe_dev_enable_tx(dev); 328 axgbe_dev_enable_rx(dev); 329 330 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); 331 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); 332 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || 333 max_pkt_len > pdata->rx_buf_size) 334 dev_data->scattered_rx = 1; 335 336 /* Scatter Rx handling */ 337 if (dev_data->scattered_rx) 338 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; 339 else 340 dev->rx_pkt_burst = &axgbe_recv_pkts; 341 342 return 0; 343 } 344 345 /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 346 static void 347 axgbe_dev_stop(struct rte_eth_dev *dev) 348 { 349 struct axgbe_port *pdata = dev->data->dev_private; 350 351 PMD_INIT_FUNC_TRACE(); 352 353 rte_intr_disable(&pdata->pci_dev->intr_handle); 354 355 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) 356 return; 357 358 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 359 axgbe_dev_disable_tx(dev); 360 axgbe_dev_disable_rx(dev); 361 362 pdata->phy_if.phy_stop(pdata); 363 pdata->hw_if.exit(pdata); 364 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 365 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 366 } 367 368 /* Clear all resources like TX/RX queues. */ 369 static void 370 axgbe_dev_close(struct rte_eth_dev *dev) 371 { 372 axgbe_dev_clear_queues(dev); 373 } 374 375 static int 376 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 377 { 378 struct axgbe_port *pdata = dev->data->dev_private; 379 380 PMD_INIT_FUNC_TRACE(); 381 382 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 383 384 return 0; 385 } 386 387 static int 388 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 389 { 390 struct axgbe_port *pdata = dev->data->dev_private; 391 392 PMD_INIT_FUNC_TRACE(); 393 394 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 395 396 return 0; 397 } 398 399 static int 400 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 401 { 402 struct axgbe_port *pdata = dev->data->dev_private; 403 404 PMD_INIT_FUNC_TRACE(); 405 406 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 407 return 0; 408 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 409 410 return 0; 411 } 412 413 static int 414 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 415 { 416 struct axgbe_port *pdata = dev->data->dev_private; 417 418 PMD_INIT_FUNC_TRACE(); 419 420 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 421 return 0; 422 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 423 424 return 0; 425 } 426 427 static int 428 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 429 { 430 struct axgbe_port *pdata = dev->data->dev_private; 431 432 /* Set Default MAC Addr */ 433 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0); 434 435 return 0; 436 } 437 438 static int 439 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 440 uint32_t index, uint32_t pool __rte_unused) 441 { 442 struct axgbe_port *pdata = dev->data->dev_private; 443 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 444 445 if (index > hw_feat->addn_mac) { 446 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 447 return -EINVAL; 448 } 449 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index); 450 return 0; 451 } 452 453 static void 454 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 455 { 456 struct axgbe_port *pdata = dev->data->dev_private; 457 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 458 459 if (index > hw_feat->addn_mac) { 460 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 461 return; 462 } 463 axgbe_set_mac_addn_addr(pdata, NULL, index); 464 } 465 466 static int 467 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 468 struct rte_ether_addr *mc_addr_set, 469 uint32_t nb_mc_addr) 470 { 471 struct axgbe_port *pdata = dev->data->dev_private; 472 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 473 uint32_t index = 1; /* 0 is always default mac */ 474 uint32_t i; 475 476 if (nb_mc_addr > hw_feat->addn_mac) { 477 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr); 478 return -EINVAL; 479 } 480 481 /* clear unicast addresses */ 482 for (i = 1; i < hw_feat->addn_mac; i++) { 483 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) 484 continue; 485 memset(&dev->data->mac_addrs[i], 0, 486 sizeof(struct rte_ether_addr)); 487 } 488 489 while (nb_mc_addr--) 490 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++); 491 492 return 0; 493 } 494 495 static int 496 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 497 struct rte_ether_addr *mac_addr, uint8_t add) 498 { 499 struct axgbe_port *pdata = dev->data->dev_private; 500 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 501 502 if (!hw_feat->hash_table_size) { 503 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 504 return -ENOTSUP; 505 } 506 507 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add); 508 509 if (pdata->uc_hash_mac_addr > 0) { 510 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 511 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 512 } else { 513 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 514 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 515 } 516 return 0; 517 } 518 519 static int 520 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add) 521 { 522 struct axgbe_port *pdata = dev->data->dev_private; 523 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 524 uint32_t index; 525 526 if (!hw_feat->hash_table_size) { 527 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 528 return -ENOTSUP; 529 } 530 531 for (index = 0; index < pdata->hash_table_count; index++) { 532 if (add) 533 pdata->uc_hash_table[index] = ~0; 534 else 535 pdata->uc_hash_table[index] = 0; 536 537 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n", 538 add ? "set" : "clear", index); 539 540 AXGMAC_IOWRITE(pdata, MAC_HTR(index), 541 pdata->uc_hash_table[index]); 542 } 543 544 if (add) { 545 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 546 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 547 } else { 548 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 549 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 550 } 551 return 0; 552 } 553 554 /* return 0 means link status changed, -1 means not changed */ 555 static int 556 axgbe_dev_link_update(struct rte_eth_dev *dev, 557 int wait_to_complete __rte_unused) 558 { 559 struct axgbe_port *pdata = dev->data->dev_private; 560 struct rte_eth_link link; 561 int ret = 0; 562 563 PMD_INIT_FUNC_TRACE(); 564 rte_delay_ms(800); 565 566 pdata->phy_if.phy_status(pdata); 567 568 memset(&link, 0, sizeof(struct rte_eth_link)); 569 link.link_duplex = pdata->phy.duplex; 570 link.link_status = pdata->phy_link; 571 link.link_speed = pdata->phy_speed; 572 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 573 ETH_LINK_SPEED_FIXED); 574 ret = rte_eth_linkstatus_set(dev, &link); 575 if (ret == -1) 576 PMD_DRV_LOG(ERR, "No change in link status\n"); 577 578 return ret; 579 } 580 581 static int 582 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 583 { 584 struct axgbe_port *pdata = dev->data->dev_private; 585 586 if (regs->data == NULL) { 587 regs->length = axgbe_regs_get_count(pdata); 588 regs->width = sizeof(uint32_t); 589 return 0; 590 } 591 592 /* Only full register dump is supported */ 593 if (regs->length && 594 regs->length != (uint32_t)axgbe_regs_get_count(pdata)) 595 return -ENOTSUP; 596 597 regs->version = pdata->pci_dev->id.vendor_id << 16 | 598 pdata->pci_dev->id.device_id; 599 axgbe_regs_dump(pdata, regs->data); 600 return 0; 601 } 602 static void axgbe_read_mmc_stats(struct axgbe_port *pdata) 603 { 604 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 605 606 /* Freeze counters */ 607 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 608 609 /* Tx counters */ 610 stats->txoctetcount_gb += 611 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 612 stats->txoctetcount_gb += 613 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32); 614 615 stats->txframecount_gb += 616 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 617 stats->txframecount_gb += 618 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32); 619 620 stats->txbroadcastframes_g += 621 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 622 stats->txbroadcastframes_g += 623 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32); 624 625 stats->txmulticastframes_g += 626 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 627 stats->txmulticastframes_g += 628 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32); 629 630 stats->tx64octets_gb += 631 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 632 stats->tx64octets_gb += 633 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32); 634 635 stats->tx65to127octets_gb += 636 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 637 stats->tx65to127octets_gb += 638 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32); 639 640 stats->tx128to255octets_gb += 641 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 642 stats->tx128to255octets_gb += 643 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32); 644 645 stats->tx256to511octets_gb += 646 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 647 stats->tx256to511octets_gb += 648 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32); 649 650 stats->tx512to1023octets_gb += 651 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 652 stats->tx512to1023octets_gb += 653 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32); 654 655 stats->tx1024tomaxoctets_gb += 656 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 657 stats->tx1024tomaxoctets_gb += 658 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32); 659 660 stats->txunicastframes_gb += 661 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 662 stats->txunicastframes_gb += 663 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32); 664 665 stats->txmulticastframes_gb += 666 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 667 stats->txmulticastframes_gb += 668 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32); 669 670 stats->txbroadcastframes_g += 671 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 672 stats->txbroadcastframes_g += 673 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32); 674 675 stats->txunderflowerror += 676 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 677 stats->txunderflowerror += 678 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32); 679 680 stats->txoctetcount_g += 681 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 682 stats->txoctetcount_g += 683 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32); 684 685 stats->txframecount_g += 686 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 687 stats->txframecount_g += 688 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32); 689 690 stats->txpauseframes += 691 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 692 stats->txpauseframes += 693 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32); 694 695 stats->txvlanframes_g += 696 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 697 stats->txvlanframes_g += 698 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32); 699 700 /* Rx counters */ 701 stats->rxframecount_gb += 702 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 703 stats->rxframecount_gb += 704 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32); 705 706 stats->rxoctetcount_gb += 707 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 708 stats->rxoctetcount_gb += 709 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32); 710 711 stats->rxoctetcount_g += 712 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 713 stats->rxoctetcount_g += 714 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32); 715 716 stats->rxbroadcastframes_g += 717 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 718 stats->rxbroadcastframes_g += 719 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32); 720 721 stats->rxmulticastframes_g += 722 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 723 stats->rxmulticastframes_g += 724 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32); 725 726 stats->rxcrcerror += 727 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 728 stats->rxcrcerror += 729 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32); 730 731 stats->rxrunterror += 732 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 733 734 stats->rxjabbererror += 735 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 736 737 stats->rxundersize_g += 738 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 739 740 stats->rxoversize_g += 741 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 742 743 stats->rx64octets_gb += 744 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 745 stats->rx64octets_gb += 746 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32); 747 748 stats->rx65to127octets_gb += 749 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 750 stats->rx65to127octets_gb += 751 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32); 752 753 stats->rx128to255octets_gb += 754 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 755 stats->rx128to255octets_gb += 756 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32); 757 758 stats->rx256to511octets_gb += 759 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 760 stats->rx256to511octets_gb += 761 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32); 762 763 stats->rx512to1023octets_gb += 764 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 765 stats->rx512to1023octets_gb += 766 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32); 767 768 stats->rx1024tomaxoctets_gb += 769 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 770 stats->rx1024tomaxoctets_gb += 771 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32); 772 773 stats->rxunicastframes_g += 774 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 775 stats->rxunicastframes_g += 776 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32); 777 778 stats->rxlengtherror += 779 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 780 stats->rxlengtherror += 781 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32); 782 783 stats->rxoutofrangetype += 784 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 785 stats->rxoutofrangetype += 786 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32); 787 788 stats->rxpauseframes += 789 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 790 stats->rxpauseframes += 791 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32); 792 793 stats->rxfifooverflow += 794 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 795 stats->rxfifooverflow += 796 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32); 797 798 stats->rxvlanframes_gb += 799 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 800 stats->rxvlanframes_gb += 801 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32); 802 803 stats->rxwatchdogerror += 804 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 805 806 /* Un-freeze counters */ 807 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 808 } 809 810 static int 811 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 812 unsigned int n) 813 { 814 struct axgbe_port *pdata = dev->data->dev_private; 815 unsigned int i; 816 817 if (!stats) 818 return 0; 819 820 axgbe_read_mmc_stats(pdata); 821 822 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) { 823 stats[i].id = i; 824 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats + 825 axgbe_xstats_strings[i].offset); 826 } 827 828 return i; 829 } 830 831 static int 832 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 833 struct rte_eth_xstat_name *xstats_names, 834 unsigned int n) 835 { 836 unsigned int i; 837 838 if (n >= AXGBE_XSTATS_COUNT && xstats_names) { 839 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) { 840 snprintf(xstats_names[i].name, 841 RTE_ETH_XSTATS_NAME_SIZE, "%s", 842 axgbe_xstats_strings[i].name); 843 } 844 } 845 846 return AXGBE_XSTATS_COUNT; 847 } 848 849 static int 850 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 851 uint64_t *values, unsigned int n) 852 { 853 unsigned int i; 854 uint64_t values_copy[AXGBE_XSTATS_COUNT]; 855 856 if (!ids) { 857 struct axgbe_port *pdata = dev->data->dev_private; 858 859 if (n < AXGBE_XSTATS_COUNT) 860 return AXGBE_XSTATS_COUNT; 861 862 axgbe_read_mmc_stats(pdata); 863 864 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { 865 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats + 866 axgbe_xstats_strings[i].offset); 867 } 868 869 return i; 870 } 871 872 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT); 873 874 for (i = 0; i < n; i++) { 875 if (ids[i] >= AXGBE_XSTATS_COUNT) { 876 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 877 return -1; 878 } 879 values[i] = values_copy[ids[i]]; 880 } 881 return n; 882 } 883 884 static int 885 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 886 struct rte_eth_xstat_name *xstats_names, 887 const uint64_t *ids, 888 unsigned int size) 889 { 890 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT]; 891 unsigned int i; 892 893 if (!ids) 894 return axgbe_dev_xstats_get_names(dev, xstats_names, size); 895 896 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size); 897 898 for (i = 0; i < size; i++) { 899 if (ids[i] >= AXGBE_XSTATS_COUNT) { 900 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 901 return -1; 902 } 903 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 904 } 905 return size; 906 } 907 908 static int 909 axgbe_dev_xstats_reset(struct rte_eth_dev *dev) 910 { 911 struct axgbe_port *pdata = dev->data->dev_private; 912 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 913 914 /* MMC registers are configured for reset on read */ 915 axgbe_read_mmc_stats(pdata); 916 917 /* Reset stats */ 918 memset(stats, 0, sizeof(*stats)); 919 920 return 0; 921 } 922 923 static int 924 axgbe_dev_stats_get(struct rte_eth_dev *dev, 925 struct rte_eth_stats *stats) 926 { 927 struct axgbe_rx_queue *rxq; 928 struct axgbe_tx_queue *txq; 929 struct axgbe_port *pdata = dev->data->dev_private; 930 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats; 931 unsigned int i; 932 933 axgbe_read_mmc_stats(pdata); 934 935 stats->imissed = mmc_stats->rxfifooverflow; 936 937 for (i = 0; i < dev->data->nb_rx_queues; i++) { 938 rxq = dev->data->rx_queues[i]; 939 stats->q_ipackets[i] = rxq->pkts; 940 stats->ipackets += rxq->pkts; 941 stats->q_ibytes[i] = rxq->bytes; 942 stats->ibytes += rxq->bytes; 943 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed; 944 stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed; 945 stats->ierrors += rxq->errors; 946 } 947 948 for (i = 0; i < dev->data->nb_tx_queues; i++) { 949 txq = dev->data->tx_queues[i]; 950 stats->q_opackets[i] = txq->pkts; 951 stats->opackets += txq->pkts; 952 stats->q_obytes[i] = txq->bytes; 953 stats->obytes += txq->bytes; 954 stats->oerrors += txq->errors; 955 } 956 957 return 0; 958 } 959 960 static int 961 axgbe_dev_stats_reset(struct rte_eth_dev *dev) 962 { 963 struct axgbe_rx_queue *rxq; 964 struct axgbe_tx_queue *txq; 965 unsigned int i; 966 967 for (i = 0; i < dev->data->nb_rx_queues; i++) { 968 rxq = dev->data->rx_queues[i]; 969 rxq->pkts = 0; 970 rxq->bytes = 0; 971 rxq->errors = 0; 972 rxq->rx_mbuf_alloc_failed = 0; 973 } 974 for (i = 0; i < dev->data->nb_tx_queues; i++) { 975 txq = dev->data->tx_queues[i]; 976 txq->pkts = 0; 977 txq->bytes = 0; 978 txq->errors = 0; 979 } 980 981 return 0; 982 } 983 984 static int 985 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 986 { 987 struct axgbe_port *pdata = dev->data->dev_private; 988 989 dev_info->max_rx_queues = pdata->rx_ring_count; 990 dev_info->max_tx_queues = pdata->tx_ring_count; 991 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 992 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 993 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1; 994 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size; 995 dev_info->speed_capa = ETH_LINK_SPEED_10G; 996 997 dev_info->rx_offload_capa = 998 DEV_RX_OFFLOAD_IPV4_CKSUM | 999 DEV_RX_OFFLOAD_UDP_CKSUM | 1000 DEV_RX_OFFLOAD_TCP_CKSUM | 1001 DEV_RX_OFFLOAD_JUMBO_FRAME | 1002 DEV_RX_OFFLOAD_SCATTER | 1003 DEV_RX_OFFLOAD_KEEP_CRC; 1004 1005 dev_info->tx_offload_capa = 1006 DEV_TX_OFFLOAD_IPV4_CKSUM | 1007 DEV_TX_OFFLOAD_UDP_CKSUM | 1008 DEV_TX_OFFLOAD_TCP_CKSUM; 1009 1010 if (pdata->hw_feat.rss) { 1011 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 1012 dev_info->reta_size = pdata->hw_feat.hash_table_size; 1013 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 1014 } 1015 1016 dev_info->rx_desc_lim = rx_desc_lim; 1017 dev_info->tx_desc_lim = tx_desc_lim; 1018 1019 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1020 .rx_free_thresh = AXGBE_RX_FREE_THRESH, 1021 }; 1022 1023 dev_info->default_txconf = (struct rte_eth_txconf) { 1024 .tx_free_thresh = AXGBE_TX_FREE_THRESH, 1025 }; 1026 1027 return 0; 1028 } 1029 1030 static int 1031 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1032 { 1033 struct axgbe_port *pdata = dev->data->dev_private; 1034 struct xgbe_fc_info fc = pdata->fc; 1035 unsigned int reg, reg_val = 0; 1036 1037 reg = MAC_Q0TFCR; 1038 reg_val = AXGMAC_IOREAD(pdata, reg); 1039 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA); 1040 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD); 1041 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT); 1042 fc.autoneg = pdata->pause_autoneg; 1043 1044 if (pdata->rx_pause && pdata->tx_pause) 1045 fc.mode = RTE_FC_FULL; 1046 else if (pdata->rx_pause) 1047 fc.mode = RTE_FC_RX_PAUSE; 1048 else if (pdata->tx_pause) 1049 fc.mode = RTE_FC_TX_PAUSE; 1050 else 1051 fc.mode = RTE_FC_NONE; 1052 1053 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024; 1054 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024; 1055 fc_conf->pause_time = fc.pause_time[0]; 1056 fc_conf->send_xon = fc.send_xon; 1057 fc_conf->mode = fc.mode; 1058 1059 return 0; 1060 } 1061 1062 static int 1063 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1064 { 1065 struct axgbe_port *pdata = dev->data->dev_private; 1066 struct xgbe_fc_info fc = pdata->fc; 1067 unsigned int reg, reg_val = 0; 1068 reg = MAC_Q0TFCR; 1069 1070 pdata->pause_autoneg = fc_conf->autoneg; 1071 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1072 fc.send_xon = fc_conf->send_xon; 1073 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA, 1074 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water)); 1075 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD, 1076 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water)); 1077 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time); 1078 AXGMAC_IOWRITE(pdata, reg, reg_val); 1079 fc.mode = fc_conf->mode; 1080 1081 if (fc.mode == RTE_FC_FULL) { 1082 pdata->tx_pause = 1; 1083 pdata->rx_pause = 1; 1084 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1085 pdata->tx_pause = 0; 1086 pdata->rx_pause = 1; 1087 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1088 pdata->tx_pause = 1; 1089 pdata->rx_pause = 0; 1090 } else { 1091 pdata->tx_pause = 0; 1092 pdata->rx_pause = 0; 1093 } 1094 1095 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1096 pdata->hw_if.config_tx_flow_control(pdata); 1097 1098 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1099 pdata->hw_if.config_rx_flow_control(pdata); 1100 1101 pdata->hw_if.config_flow_control(pdata); 1102 pdata->phy.tx_pause = pdata->tx_pause; 1103 pdata->phy.rx_pause = pdata->rx_pause; 1104 1105 return 0; 1106 } 1107 1108 static int 1109 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 1110 struct rte_eth_pfc_conf *pfc_conf) 1111 { 1112 struct axgbe_port *pdata = dev->data->dev_private; 1113 struct xgbe_fc_info fc = pdata->fc; 1114 uint8_t tc_num; 1115 1116 tc_num = pdata->pfc_map[pfc_conf->priority]; 1117 1118 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { 1119 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", 1120 pdata->hw_feat.tc_cnt); 1121 return -EINVAL; 1122 } 1123 1124 pdata->pause_autoneg = pfc_conf->fc.autoneg; 1125 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1126 fc.send_xon = pfc_conf->fc.send_xon; 1127 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA, 1128 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water)); 1129 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD, 1130 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water)); 1131 1132 switch (tc_num) { 1133 case 0: 1134 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1135 PSTC0, pfc_conf->fc.pause_time); 1136 break; 1137 case 1: 1138 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1139 PSTC1, pfc_conf->fc.pause_time); 1140 break; 1141 case 2: 1142 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1143 PSTC2, pfc_conf->fc.pause_time); 1144 break; 1145 case 3: 1146 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1147 PSTC3, pfc_conf->fc.pause_time); 1148 break; 1149 case 4: 1150 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1151 PSTC4, pfc_conf->fc.pause_time); 1152 break; 1153 case 5: 1154 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1155 PSTC5, pfc_conf->fc.pause_time); 1156 break; 1157 case 7: 1158 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1159 PSTC6, pfc_conf->fc.pause_time); 1160 break; 1161 case 6: 1162 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1163 PSTC7, pfc_conf->fc.pause_time); 1164 break; 1165 } 1166 1167 fc.mode = pfc_conf->fc.mode; 1168 1169 if (fc.mode == RTE_FC_FULL) { 1170 pdata->tx_pause = 1; 1171 pdata->rx_pause = 1; 1172 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1173 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1174 pdata->tx_pause = 0; 1175 pdata->rx_pause = 1; 1176 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1177 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1178 pdata->tx_pause = 1; 1179 pdata->rx_pause = 0; 1180 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1181 } else { 1182 pdata->tx_pause = 0; 1183 pdata->rx_pause = 0; 1184 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1185 } 1186 1187 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1188 pdata->hw_if.config_tx_flow_control(pdata); 1189 1190 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1191 pdata->hw_if.config_rx_flow_control(pdata); 1192 pdata->hw_if.config_flow_control(pdata); 1193 pdata->phy.tx_pause = pdata->tx_pause; 1194 pdata->phy.rx_pause = pdata->rx_pause; 1195 1196 return 0; 1197 } 1198 1199 void 1200 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1201 struct rte_eth_rxq_info *qinfo) 1202 { 1203 struct axgbe_rx_queue *rxq; 1204 1205 rxq = dev->data->rx_queues[queue_id]; 1206 qinfo->mp = rxq->mb_pool; 1207 qinfo->scattered_rx = dev->data->scattered_rx; 1208 qinfo->nb_desc = rxq->nb_desc; 1209 qinfo->conf.rx_free_thresh = rxq->free_thresh; 1210 } 1211 1212 void 1213 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1214 struct rte_eth_txq_info *qinfo) 1215 { 1216 struct axgbe_tx_queue *txq; 1217 1218 txq = dev->data->tx_queues[queue_id]; 1219 qinfo->nb_desc = txq->nb_desc; 1220 qinfo->conf.tx_free_thresh = txq->free_thresh; 1221 } 1222 const uint32_t * 1223 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1224 { 1225 static const uint32_t ptypes[] = { 1226 RTE_PTYPE_L2_ETHER, 1227 RTE_PTYPE_L2_ETHER_TIMESYNC, 1228 RTE_PTYPE_L2_ETHER_LLDP, 1229 RTE_PTYPE_L2_ETHER_ARP, 1230 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1231 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 1232 RTE_PTYPE_L4_FRAG, 1233 RTE_PTYPE_L4_ICMP, 1234 RTE_PTYPE_L4_NONFRAG, 1235 RTE_PTYPE_L4_SCTP, 1236 RTE_PTYPE_L4_TCP, 1237 RTE_PTYPE_L4_UDP, 1238 RTE_PTYPE_TUNNEL_GRENAT, 1239 RTE_PTYPE_TUNNEL_IP, 1240 RTE_PTYPE_INNER_L2_ETHER, 1241 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1242 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 1243 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 1244 RTE_PTYPE_INNER_L4_FRAG, 1245 RTE_PTYPE_INNER_L4_ICMP, 1246 RTE_PTYPE_INNER_L4_NONFRAG, 1247 RTE_PTYPE_INNER_L4_SCTP, 1248 RTE_PTYPE_INNER_L4_TCP, 1249 RTE_PTYPE_INNER_L4_UDP, 1250 RTE_PTYPE_UNKNOWN 1251 }; 1252 1253 if (dev->rx_pkt_burst == axgbe_recv_pkts) 1254 return ptypes; 1255 return NULL; 1256 } 1257 1258 static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 1259 { 1260 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 1261 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 1262 1263 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 1264 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 1265 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 1266 1267 memset(hw_feat, 0, sizeof(*hw_feat)); 1268 1269 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 1270 1271 /* Hardware feature register 0 */ 1272 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 1273 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 1274 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 1275 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 1276 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 1277 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 1278 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 1279 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 1280 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 1281 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 1282 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 1283 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 1284 ADDMACADRSEL); 1285 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 1286 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 1287 1288 /* Hardware feature register 1 */ 1289 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1290 RXFIFOSIZE); 1291 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1292 TXFIFOSIZE); 1293 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 1294 MAC_HWF1R, ADVTHWORD); 1295 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 1296 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 1297 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 1298 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 1299 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 1300 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 1301 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 1302 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1303 HASHTBLSZ); 1304 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1305 L3L4FNUM); 1306 1307 /* Hardware feature register 2 */ 1308 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 1309 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 1310 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 1311 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 1312 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 1313 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 1314 AUXSNAPNUM); 1315 1316 /* Translate the Hash Table size into actual number */ 1317 switch (hw_feat->hash_table_size) { 1318 case 0: 1319 break; 1320 case 1: 1321 hw_feat->hash_table_size = 64; 1322 break; 1323 case 2: 1324 hw_feat->hash_table_size = 128; 1325 break; 1326 case 3: 1327 hw_feat->hash_table_size = 256; 1328 break; 1329 } 1330 1331 /* Translate the address width setting into actual number */ 1332 switch (hw_feat->dma_width) { 1333 case 0: 1334 hw_feat->dma_width = 32; 1335 break; 1336 case 1: 1337 hw_feat->dma_width = 40; 1338 break; 1339 case 2: 1340 hw_feat->dma_width = 48; 1341 break; 1342 default: 1343 hw_feat->dma_width = 32; 1344 } 1345 1346 /* The Queue, Channel and TC counts are zero based so increment them 1347 * to get the actual number 1348 */ 1349 hw_feat->rx_q_cnt++; 1350 hw_feat->tx_q_cnt++; 1351 hw_feat->rx_ch_cnt++; 1352 hw_feat->tx_ch_cnt++; 1353 hw_feat->tc_cnt++; 1354 1355 /* Translate the fifo sizes into actual numbers */ 1356 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 1357 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 1358 } 1359 1360 static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 1361 { 1362 axgbe_init_function_ptrs_dev(&pdata->hw_if); 1363 axgbe_init_function_ptrs_phy(&pdata->phy_if); 1364 axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 1365 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 1366 } 1367 1368 static void axgbe_set_counts(struct axgbe_port *pdata) 1369 { 1370 /* Set all the function pointers */ 1371 axgbe_init_all_fptrs(pdata); 1372 1373 /* Populate the hardware features */ 1374 axgbe_get_all_hw_features(pdata); 1375 1376 /* Set default max values if not provided */ 1377 if (!pdata->tx_max_channel_count) 1378 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 1379 if (!pdata->rx_max_channel_count) 1380 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 1381 1382 if (!pdata->tx_max_q_count) 1383 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 1384 if (!pdata->rx_max_q_count) 1385 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 1386 1387 /* Calculate the number of Tx and Rx rings to be created 1388 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 1389 * the number of Tx queues to the number of Tx channels 1390 * enabled 1391 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 1392 * number of Rx queues or maximum allowed 1393 */ 1394 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 1395 pdata->tx_max_channel_count); 1396 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 1397 pdata->tx_max_q_count); 1398 1399 pdata->tx_q_count = pdata->tx_ring_count; 1400 1401 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 1402 pdata->rx_max_channel_count); 1403 1404 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 1405 pdata->rx_max_q_count); 1406 } 1407 1408 static void axgbe_default_config(struct axgbe_port *pdata) 1409 { 1410 pdata->pblx8 = DMA_PBL_X8_ENABLE; 1411 pdata->tx_sf_mode = MTL_TSF_ENABLE; 1412 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 1413 pdata->tx_pbl = DMA_PBL_32; 1414 pdata->tx_osp_mode = DMA_OSP_ENABLE; 1415 pdata->rx_sf_mode = MTL_RSF_ENABLE; 1416 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 1417 pdata->rx_pbl = DMA_PBL_32; 1418 pdata->pause_autoneg = 1; 1419 pdata->tx_pause = 0; 1420 pdata->rx_pause = 0; 1421 pdata->phy_speed = SPEED_UNKNOWN; 1422 pdata->power_down = 0; 1423 } 1424 1425 static int 1426 pci_device_cmp(const struct rte_device *dev, const void *_pci_id) 1427 { 1428 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); 1429 const struct rte_pci_id *pcid = _pci_id; 1430 1431 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID && 1432 pdev->id.device_id == pcid->device_id) 1433 return 0; 1434 return 1; 1435 } 1436 1437 static bool 1438 pci_search_device(int device_id) 1439 { 1440 struct rte_bus *pci_bus; 1441 struct rte_pci_id dev_id; 1442 1443 dev_id.device_id = device_id; 1444 pci_bus = rte_bus_find_by_name("pci"); 1445 return (pci_bus != NULL) && 1446 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL); 1447 } 1448 1449 /* 1450 * It returns 0 on success. 1451 */ 1452 static int 1453 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 1454 { 1455 PMD_INIT_FUNC_TRACE(); 1456 struct axgbe_port *pdata; 1457 struct rte_pci_device *pci_dev; 1458 uint32_t reg, mac_lo, mac_hi; 1459 uint32_t len; 1460 int ret; 1461 1462 eth_dev->dev_ops = &axgbe_eth_dev_ops; 1463 1464 /* 1465 * For secondary processes, we don't initialise any further as primary 1466 * has already done this work. 1467 */ 1468 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1469 return 0; 1470 1471 pdata = eth_dev->data->dev_private; 1472 /* initial state */ 1473 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 1474 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 1475 pdata->eth_dev = eth_dev; 1476 1477 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1478 pdata->pci_dev = pci_dev; 1479 1480 /* 1481 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE 1482 */ 1483 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) { 1484 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; 1485 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; 1486 } else { 1487 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 1488 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 1489 } 1490 1491 pdata->xgmac_regs = 1492 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 1493 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 1494 + AXGBE_MAC_PROP_OFFSET); 1495 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 1496 + AXGBE_I2C_CTRL_OFFSET); 1497 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 1498 1499 /* version specific driver data*/ 1500 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 1501 pdata->vdata = &axgbe_v2a; 1502 else 1503 pdata->vdata = &axgbe_v2b; 1504 1505 /* Configure the PCS indirect addressing support */ 1506 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); 1507 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 1508 pdata->xpcs_window <<= 6; 1509 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 1510 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 1511 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 1512 1513 PMD_INIT_LOG(DEBUG, 1514 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 1515 pdata->xpcs_window_size, pdata->xpcs_window_mask); 1516 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 1517 1518 /* Retrieve the MAC address */ 1519 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 1520 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 1521 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 1522 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 1523 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 1524 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 1525 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 1526 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 1527 1528 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS; 1529 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0); 1530 1531 if (!eth_dev->data->mac_addrs) { 1532 PMD_INIT_LOG(ERR, 1533 "Failed to alloc %u bytes needed to " 1534 "store MAC addresses", len); 1535 return -ENOMEM; 1536 } 1537 1538 /* Allocate memory for storing hash filter MAC addresses */ 1539 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS; 1540 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr", 1541 len, 0); 1542 1543 if (eth_dev->data->hash_mac_addrs == NULL) { 1544 PMD_INIT_LOG(ERR, 1545 "Failed to allocate %d bytes needed to " 1546 "store MAC addresses", len); 1547 return -ENOMEM; 1548 } 1549 1550 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 1551 rte_eth_random_addr(pdata->mac_addr.addr_bytes); 1552 1553 /* Copy the permanent MAC address */ 1554 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 1555 1556 /* Clock settings */ 1557 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 1558 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 1559 1560 /* Set the DMA coherency values */ 1561 pdata->coherent = 1; 1562 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 1563 pdata->arcache = AXGBE_DMA_OS_ARCACHE; 1564 pdata->awcache = AXGBE_DMA_OS_AWCACHE; 1565 1566 /* Set the maximum channels and queues */ 1567 reg = XP_IOREAD(pdata, XP_PROP_1); 1568 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 1569 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 1570 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 1571 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 1572 1573 /* Set the hardware channel and queue counts */ 1574 axgbe_set_counts(pdata); 1575 1576 /* Set the maximum fifo amounts */ 1577 reg = XP_IOREAD(pdata, XP_PROP_2); 1578 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 1579 pdata->tx_max_fifo_size *= 16384; 1580 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 1581 pdata->vdata->tx_max_fifo_size); 1582 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 1583 pdata->rx_max_fifo_size *= 16384; 1584 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 1585 pdata->vdata->rx_max_fifo_size); 1586 /* Issue software reset to DMA */ 1587 ret = pdata->hw_if.exit(pdata); 1588 if (ret) 1589 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 1590 1591 /* Set default configuration data */ 1592 axgbe_default_config(pdata); 1593 1594 /* Set default max values if not provided */ 1595 if (!pdata->tx_max_fifo_size) 1596 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 1597 if (!pdata->rx_max_fifo_size) 1598 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 1599 1600 pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 1601 pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 1602 pthread_mutex_init(&pdata->xpcs_mutex, NULL); 1603 pthread_mutex_init(&pdata->i2c_mutex, NULL); 1604 pthread_mutex_init(&pdata->an_mutex, NULL); 1605 pthread_mutex_init(&pdata->phy_mutex, NULL); 1606 1607 ret = pdata->phy_if.phy_init(pdata); 1608 if (ret) { 1609 rte_free(eth_dev->data->mac_addrs); 1610 eth_dev->data->mac_addrs = NULL; 1611 return ret; 1612 } 1613 1614 rte_intr_callback_register(&pci_dev->intr_handle, 1615 axgbe_dev_interrupt_handler, 1616 (void *)eth_dev); 1617 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1618 eth_dev->data->port_id, pci_dev->id.vendor_id, 1619 pci_dev->id.device_id); 1620 1621 return 0; 1622 } 1623 1624 static int 1625 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1626 { 1627 struct rte_pci_device *pci_dev; 1628 1629 PMD_INIT_FUNC_TRACE(); 1630 1631 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1632 return 0; 1633 1634 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1635 eth_dev->dev_ops = NULL; 1636 eth_dev->rx_pkt_burst = NULL; 1637 eth_dev->tx_pkt_burst = NULL; 1638 axgbe_dev_clear_queues(eth_dev); 1639 1640 /* disable uio intr before callback unregister */ 1641 rte_intr_disable(&pci_dev->intr_handle); 1642 rte_intr_callback_unregister(&pci_dev->intr_handle, 1643 axgbe_dev_interrupt_handler, 1644 (void *)eth_dev); 1645 1646 return 0; 1647 } 1648 1649 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1650 struct rte_pci_device *pci_dev) 1651 { 1652 return rte_eth_dev_pci_generic_probe(pci_dev, 1653 sizeof(struct axgbe_port), eth_axgbe_dev_init); 1654 } 1655 1656 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 1657 { 1658 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 1659 } 1660 1661 static struct rte_pci_driver rte_axgbe_pmd = { 1662 .id_table = pci_id_axgbe_map, 1663 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1664 .probe = eth_axgbe_pci_probe, 1665 .remove = eth_axgbe_pci_remove, 1666 }; 1667 1668 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 1669 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 1670 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 1671 1672 RTE_INIT(axgbe_init_log) 1673 { 1674 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); 1675 if (axgbe_logtype_init >= 0) 1676 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); 1677 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); 1678 if (axgbe_logtype_driver >= 0) 1679 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); 1680 } 1681