1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_rxtx.h" 7 #include "axgbe_ethdev.h" 8 #include "axgbe_common.h" 9 #include "axgbe_phy.h" 10 #include "axgbe_regs.h" 11 #include "rte_time.h" 12 13 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 14 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 15 static int axgbe_dev_configure(struct rte_eth_dev *dev); 16 static int axgbe_dev_start(struct rte_eth_dev *dev); 17 static void axgbe_dev_stop(struct rte_eth_dev *dev); 18 static void axgbe_dev_interrupt_handler(void *param); 19 static void axgbe_dev_close(struct rte_eth_dev *dev); 20 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 21 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 22 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 23 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 24 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, 25 struct rte_ether_addr *mac_addr); 26 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, 27 struct rte_ether_addr *mac_addr, 28 uint32_t index, 29 uint32_t vmdq); 30 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 31 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 32 struct rte_ether_addr *mc_addr_set, 33 uint32_t nb_mc_addr); 34 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 35 struct rte_ether_addr *mac_addr, 36 uint8_t add); 37 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, 38 uint8_t add); 39 static int axgbe_dev_link_update(struct rte_eth_dev *dev, 40 int wait_to_complete); 41 static int axgbe_dev_get_regs(struct rte_eth_dev *dev, 42 struct rte_dev_reg_info *regs); 43 static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 44 struct rte_eth_stats *stats); 45 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev); 46 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev, 47 struct rte_eth_xstat *stats, 48 unsigned int n); 49 static int 50 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 51 struct rte_eth_xstat_name *xstats_names, 52 unsigned int size); 53 static int 54 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, 55 const uint64_t *ids, 56 uint64_t *values, 57 unsigned int n); 58 static int 59 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 60 struct rte_eth_xstat_name *xstats_names, 61 const uint64_t *ids, 62 unsigned int size); 63 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev); 64 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 65 struct rte_eth_rss_reta_entry64 *reta_conf, 66 uint16_t reta_size); 67 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 68 struct rte_eth_rss_reta_entry64 *reta_conf, 69 uint16_t reta_size); 70 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 71 struct rte_eth_rss_conf *rss_conf); 72 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 73 struct rte_eth_rss_conf *rss_conf); 74 static int axgbe_dev_info_get(struct rte_eth_dev *dev, 75 struct rte_eth_dev_info *dev_info); 76 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev, 77 struct rte_eth_fc_conf *fc_conf); 78 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev, 79 struct rte_eth_fc_conf *fc_conf); 80 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 81 struct rte_eth_pfc_conf *pfc_conf); 82 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 83 struct rte_eth_rxq_info *qinfo); 84 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 85 struct rte_eth_txq_info *qinfo); 86 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 87 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 88 89 static int 90 axgbe_timesync_enable(struct rte_eth_dev *dev); 91 static int 92 axgbe_timesync_disable(struct rte_eth_dev *dev); 93 static int 94 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 95 struct timespec *timestamp, uint32_t flags); 96 static int 97 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 98 struct timespec *timestamp); 99 static int 100 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 101 static int 102 axgbe_timesync_read_time(struct rte_eth_dev *dev, 103 struct timespec *timestamp); 104 static int 105 axgbe_timesync_write_time(struct rte_eth_dev *dev, 106 const struct timespec *timestamp); 107 static void 108 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec, 109 unsigned int nsec); 110 static void 111 axgbe_update_tstamp_addend(struct axgbe_port *pdata, 112 unsigned int addend); 113 114 struct axgbe_xstats { 115 char name[RTE_ETH_XSTATS_NAME_SIZE]; 116 int offset; 117 }; 118 119 #define AXGMAC_MMC_STAT(_string, _var) \ 120 { _string, \ 121 offsetof(struct axgbe_mmc_stats, _var), \ 122 } 123 124 static const struct axgbe_xstats axgbe_xstats_strings[] = { 125 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), 126 AXGMAC_MMC_STAT("tx_packets", txframecount_gb), 127 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), 128 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), 129 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), 130 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), 131 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), 132 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), 133 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), 134 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), 135 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), 136 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), 137 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), 138 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes), 139 140 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), 141 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb), 142 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), 143 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), 144 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), 145 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), 146 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), 147 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), 148 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), 149 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), 150 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), 151 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), 152 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), 153 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), 154 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), 155 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), 156 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), 157 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror), 158 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), 159 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), 160 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), 161 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), 162 }; 163 164 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings) 165 166 /* The set of PCI devices this driver supports */ 167 #define AMD_PCI_VENDOR_ID 0x1022 168 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 169 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 170 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 171 172 static const struct rte_pci_id pci_id_axgbe_map[] = { 173 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 174 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 175 { .vendor_id = 0, }, 176 }; 177 178 static struct axgbe_version_data axgbe_v2a = { 179 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 180 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 181 .mmc_64bit = 1, 182 .tx_max_fifo_size = 229376, 183 .rx_max_fifo_size = 229376, 184 .tx_tstamp_workaround = 1, 185 .ecc_support = 1, 186 .i2c_support = 1, 187 .an_cdr_workaround = 1, 188 }; 189 190 static struct axgbe_version_data axgbe_v2b = { 191 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 192 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 193 .mmc_64bit = 1, 194 .tx_max_fifo_size = 65536, 195 .rx_max_fifo_size = 65536, 196 .tx_tstamp_workaround = 1, 197 .ecc_support = 1, 198 .i2c_support = 1, 199 .an_cdr_workaround = 1, 200 }; 201 202 static const struct rte_eth_desc_lim rx_desc_lim = { 203 .nb_max = AXGBE_MAX_RING_DESC, 204 .nb_min = AXGBE_MIN_RING_DESC, 205 .nb_align = 8, 206 }; 207 208 static const struct rte_eth_desc_lim tx_desc_lim = { 209 .nb_max = AXGBE_MAX_RING_DESC, 210 .nb_min = AXGBE_MIN_RING_DESC, 211 .nb_align = 8, 212 }; 213 214 static const struct eth_dev_ops axgbe_eth_dev_ops = { 215 .dev_configure = axgbe_dev_configure, 216 .dev_start = axgbe_dev_start, 217 .dev_stop = axgbe_dev_stop, 218 .dev_close = axgbe_dev_close, 219 .promiscuous_enable = axgbe_dev_promiscuous_enable, 220 .promiscuous_disable = axgbe_dev_promiscuous_disable, 221 .allmulticast_enable = axgbe_dev_allmulticast_enable, 222 .allmulticast_disable = axgbe_dev_allmulticast_disable, 223 .mac_addr_set = axgbe_dev_mac_addr_set, 224 .mac_addr_add = axgbe_dev_mac_addr_add, 225 .mac_addr_remove = axgbe_dev_mac_addr_remove, 226 .set_mc_addr_list = axgbe_dev_set_mc_addr_list, 227 .uc_hash_table_set = axgbe_dev_uc_hash_table_set, 228 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set, 229 .link_update = axgbe_dev_link_update, 230 .get_reg = axgbe_dev_get_regs, 231 .stats_get = axgbe_dev_stats_get, 232 .stats_reset = axgbe_dev_stats_reset, 233 .xstats_get = axgbe_dev_xstats_get, 234 .xstats_reset = axgbe_dev_xstats_reset, 235 .xstats_get_names = axgbe_dev_xstats_get_names, 236 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id, 237 .xstats_get_by_id = axgbe_dev_xstats_get_by_id, 238 .reta_update = axgbe_dev_rss_reta_update, 239 .reta_query = axgbe_dev_rss_reta_query, 240 .rss_hash_update = axgbe_dev_rss_hash_update, 241 .rss_hash_conf_get = axgbe_dev_rss_hash_conf_get, 242 .dev_infos_get = axgbe_dev_info_get, 243 .rx_queue_setup = axgbe_dev_rx_queue_setup, 244 .rx_queue_release = axgbe_dev_rx_queue_release, 245 .tx_queue_setup = axgbe_dev_tx_queue_setup, 246 .tx_queue_release = axgbe_dev_tx_queue_release, 247 .flow_ctrl_get = axgbe_flow_ctrl_get, 248 .flow_ctrl_set = axgbe_flow_ctrl_set, 249 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set, 250 .rxq_info_get = axgbe_rxq_info_get, 251 .txq_info_get = axgbe_txq_info_get, 252 .dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get, 253 .mtu_set = axgb_mtu_set, 254 .timesync_enable = axgbe_timesync_enable, 255 .timesync_disable = axgbe_timesync_disable, 256 .timesync_read_rx_timestamp = axgbe_timesync_read_rx_timestamp, 257 .timesync_read_tx_timestamp = axgbe_timesync_read_tx_timestamp, 258 .timesync_adjust_time = axgbe_timesync_adjust_time, 259 .timesync_read_time = axgbe_timesync_read_time, 260 .timesync_write_time = axgbe_timesync_write_time, 261 }; 262 263 static int axgbe_phy_reset(struct axgbe_port *pdata) 264 { 265 pdata->phy_link = -1; 266 pdata->phy_speed = SPEED_UNKNOWN; 267 return pdata->phy_if.phy_reset(pdata); 268 } 269 270 /* 271 * Interrupt handler triggered by NIC for handling 272 * specific interrupt. 273 * 274 * @param handle 275 * Pointer to interrupt handle. 276 * @param param 277 * The address of parameter (struct rte_eth_dev *) regsitered before. 278 * 279 * @return 280 * void 281 */ 282 static void 283 axgbe_dev_interrupt_handler(void *param) 284 { 285 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 286 struct axgbe_port *pdata = dev->data->dev_private; 287 unsigned int dma_isr, dma_ch_isr; 288 289 pdata->phy_if.an_isr(pdata); 290 /*DMA related interrupts*/ 291 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 292 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr); 293 if (dma_isr) { 294 if (dma_isr & 1) { 295 dma_ch_isr = 296 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 297 pdata->rx_queues[0], 298 DMA_CH_SR); 299 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr); 300 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 301 pdata->rx_queues[0], 302 DMA_CH_SR, dma_ch_isr); 303 } 304 } 305 /* Unmask interrupts since disabled after generation */ 306 rte_intr_ack(&pdata->pci_dev->intr_handle); 307 } 308 309 /* 310 * Configure device link speed and setup link. 311 * It returns 0 on success. 312 */ 313 static int 314 axgbe_dev_configure(struct rte_eth_dev *dev) 315 { 316 struct axgbe_port *pdata = dev->data->dev_private; 317 /* Checksum offload to hardware */ 318 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 319 DEV_RX_OFFLOAD_CHECKSUM; 320 return 0; 321 } 322 323 static int 324 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 325 { 326 struct axgbe_port *pdata = dev->data->dev_private; 327 328 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 329 pdata->rss_enable = 1; 330 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 331 pdata->rss_enable = 0; 332 else 333 return -1; 334 return 0; 335 } 336 337 static int 338 axgbe_dev_start(struct rte_eth_dev *dev) 339 { 340 struct axgbe_port *pdata = dev->data->dev_private; 341 int ret; 342 struct rte_eth_dev_data *dev_data = dev->data; 343 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len; 344 345 dev->dev_ops = &axgbe_eth_dev_ops; 346 347 PMD_INIT_FUNC_TRACE(); 348 349 /* Multiqueue RSS */ 350 ret = axgbe_dev_rx_mq_config(dev); 351 if (ret) { 352 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 353 return ret; 354 } 355 ret = axgbe_phy_reset(pdata); 356 if (ret) { 357 PMD_DRV_LOG(ERR, "phy reset failed\n"); 358 return ret; 359 } 360 ret = pdata->hw_if.init(pdata); 361 if (ret) { 362 PMD_DRV_LOG(ERR, "dev_init failed\n"); 363 return ret; 364 } 365 366 /* enable uio/vfio intr/eventfd mapping */ 367 rte_intr_enable(&pdata->pci_dev->intr_handle); 368 369 /* phy start*/ 370 pdata->phy_if.phy_start(pdata); 371 axgbe_dev_enable_tx(dev); 372 axgbe_dev_enable_rx(dev); 373 374 rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state); 375 rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state); 376 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || 377 max_pkt_len > pdata->rx_buf_size) 378 dev_data->scattered_rx = 1; 379 380 /* Scatter Rx handling */ 381 if (dev_data->scattered_rx) 382 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; 383 else 384 dev->rx_pkt_burst = &axgbe_recv_pkts; 385 386 return 0; 387 } 388 389 /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 390 static void 391 axgbe_dev_stop(struct rte_eth_dev *dev) 392 { 393 struct axgbe_port *pdata = dev->data->dev_private; 394 395 PMD_INIT_FUNC_TRACE(); 396 397 rte_intr_disable(&pdata->pci_dev->intr_handle); 398 399 if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state)) 400 return; 401 402 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state); 403 axgbe_dev_disable_tx(dev); 404 axgbe_dev_disable_rx(dev); 405 406 pdata->phy_if.phy_stop(pdata); 407 pdata->hw_if.exit(pdata); 408 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 409 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state); 410 } 411 412 /* Clear all resources like TX/RX queues. */ 413 static void 414 axgbe_dev_close(struct rte_eth_dev *dev) 415 { 416 axgbe_dev_clear_queues(dev); 417 } 418 419 static int 420 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 421 { 422 struct axgbe_port *pdata = dev->data->dev_private; 423 424 PMD_INIT_FUNC_TRACE(); 425 426 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 427 428 return 0; 429 } 430 431 static int 432 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 433 { 434 struct axgbe_port *pdata = dev->data->dev_private; 435 436 PMD_INIT_FUNC_TRACE(); 437 438 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 439 440 return 0; 441 } 442 443 static int 444 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 445 { 446 struct axgbe_port *pdata = dev->data->dev_private; 447 448 PMD_INIT_FUNC_TRACE(); 449 450 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 451 return 0; 452 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 453 454 return 0; 455 } 456 457 static int 458 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 459 { 460 struct axgbe_port *pdata = dev->data->dev_private; 461 462 PMD_INIT_FUNC_TRACE(); 463 464 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 465 return 0; 466 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 467 468 return 0; 469 } 470 471 static int 472 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 473 { 474 struct axgbe_port *pdata = dev->data->dev_private; 475 476 /* Set Default MAC Addr */ 477 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0); 478 479 return 0; 480 } 481 482 static int 483 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 484 uint32_t index, uint32_t pool __rte_unused) 485 { 486 struct axgbe_port *pdata = dev->data->dev_private; 487 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 488 489 if (index > hw_feat->addn_mac) { 490 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 491 return -EINVAL; 492 } 493 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index); 494 return 0; 495 } 496 497 static int 498 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 499 struct rte_eth_rss_reta_entry64 *reta_conf, 500 uint16_t reta_size) 501 { 502 struct axgbe_port *pdata = dev->data->dev_private; 503 unsigned int i, idx, shift; 504 int ret; 505 506 if (!pdata->rss_enable) { 507 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 508 return -ENOTSUP; 509 } 510 511 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 512 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 513 return -EINVAL; 514 } 515 516 for (i = 0; i < reta_size; i++) { 517 idx = i / RTE_RETA_GROUP_SIZE; 518 shift = i % RTE_RETA_GROUP_SIZE; 519 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 520 continue; 521 pdata->rss_table[i] = reta_conf[idx].reta[shift]; 522 } 523 524 /* Program the lookup table */ 525 ret = axgbe_write_rss_lookup_table(pdata); 526 return ret; 527 } 528 529 static int 530 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 531 struct rte_eth_rss_reta_entry64 *reta_conf, 532 uint16_t reta_size) 533 { 534 struct axgbe_port *pdata = dev->data->dev_private; 535 unsigned int i, idx, shift; 536 537 if (!pdata->rss_enable) { 538 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 539 return -ENOTSUP; 540 } 541 542 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 543 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 544 return -EINVAL; 545 } 546 547 for (i = 0; i < reta_size; i++) { 548 idx = i / RTE_RETA_GROUP_SIZE; 549 shift = i % RTE_RETA_GROUP_SIZE; 550 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 551 continue; 552 reta_conf[idx].reta[shift] = pdata->rss_table[i]; 553 } 554 return 0; 555 } 556 557 static int 558 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 559 struct rte_eth_rss_conf *rss_conf) 560 { 561 struct axgbe_port *pdata = dev->data->dev_private; 562 int ret; 563 564 if (!pdata->rss_enable) { 565 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 566 return -ENOTSUP; 567 } 568 569 if (rss_conf == NULL) { 570 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 571 return -EINVAL; 572 } 573 574 if (rss_conf->rss_key != NULL && 575 rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) { 576 rte_memcpy(pdata->rss_key, rss_conf->rss_key, 577 AXGBE_RSS_HASH_KEY_SIZE); 578 /* Program the hash key */ 579 ret = axgbe_write_rss_hash_key(pdata); 580 if (ret != 0) 581 return ret; 582 } 583 584 pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD; 585 586 if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6)) 587 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 588 if (pdata->rss_hf & 589 (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP)) 590 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); 591 if (pdata->rss_hf & 592 (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP)) 593 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 594 595 /* Set the RSS options */ 596 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 597 598 return 0; 599 } 600 601 static int 602 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 603 struct rte_eth_rss_conf *rss_conf) 604 { 605 struct axgbe_port *pdata = dev->data->dev_private; 606 607 if (!pdata->rss_enable) { 608 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 609 return -ENOTSUP; 610 } 611 612 if (rss_conf == NULL) { 613 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 614 return -EINVAL; 615 } 616 617 if (rss_conf->rss_key != NULL && 618 rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) { 619 rte_memcpy(rss_conf->rss_key, pdata->rss_key, 620 AXGBE_RSS_HASH_KEY_SIZE); 621 } 622 rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE; 623 rss_conf->rss_hf = pdata->rss_hf; 624 return 0; 625 } 626 627 static void 628 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 629 { 630 struct axgbe_port *pdata = dev->data->dev_private; 631 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 632 633 if (index > hw_feat->addn_mac) { 634 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 635 return; 636 } 637 axgbe_set_mac_addn_addr(pdata, NULL, index); 638 } 639 640 static int 641 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 642 struct rte_ether_addr *mc_addr_set, 643 uint32_t nb_mc_addr) 644 { 645 struct axgbe_port *pdata = dev->data->dev_private; 646 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 647 uint32_t index = 1; /* 0 is always default mac */ 648 uint32_t i; 649 650 if (nb_mc_addr > hw_feat->addn_mac) { 651 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr); 652 return -EINVAL; 653 } 654 655 /* clear unicast addresses */ 656 for (i = 1; i < hw_feat->addn_mac; i++) { 657 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) 658 continue; 659 memset(&dev->data->mac_addrs[i], 0, 660 sizeof(struct rte_ether_addr)); 661 } 662 663 while (nb_mc_addr--) 664 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++); 665 666 return 0; 667 } 668 669 static int 670 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 671 struct rte_ether_addr *mac_addr, uint8_t add) 672 { 673 struct axgbe_port *pdata = dev->data->dev_private; 674 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 675 676 if (!hw_feat->hash_table_size) { 677 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 678 return -ENOTSUP; 679 } 680 681 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add); 682 683 if (pdata->uc_hash_mac_addr > 0) { 684 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 685 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 686 } else { 687 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 688 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 689 } 690 return 0; 691 } 692 693 static int 694 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add) 695 { 696 struct axgbe_port *pdata = dev->data->dev_private; 697 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 698 uint32_t index; 699 700 if (!hw_feat->hash_table_size) { 701 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 702 return -ENOTSUP; 703 } 704 705 for (index = 0; index < pdata->hash_table_count; index++) { 706 if (add) 707 pdata->uc_hash_table[index] = ~0; 708 else 709 pdata->uc_hash_table[index] = 0; 710 711 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n", 712 add ? "set" : "clear", index); 713 714 AXGMAC_IOWRITE(pdata, MAC_HTR(index), 715 pdata->uc_hash_table[index]); 716 } 717 718 if (add) { 719 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 720 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 721 } else { 722 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 723 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 724 } 725 return 0; 726 } 727 728 /* return 0 means link status changed, -1 means not changed */ 729 static int 730 axgbe_dev_link_update(struct rte_eth_dev *dev, 731 int wait_to_complete __rte_unused) 732 { 733 struct axgbe_port *pdata = dev->data->dev_private; 734 struct rte_eth_link link; 735 int ret = 0; 736 737 PMD_INIT_FUNC_TRACE(); 738 rte_delay_ms(800); 739 740 pdata->phy_if.phy_status(pdata); 741 742 memset(&link, 0, sizeof(struct rte_eth_link)); 743 link.link_duplex = pdata->phy.duplex; 744 link.link_status = pdata->phy_link; 745 link.link_speed = pdata->phy_speed; 746 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 747 ETH_LINK_SPEED_FIXED); 748 ret = rte_eth_linkstatus_set(dev, &link); 749 if (ret == -1) 750 PMD_DRV_LOG(ERR, "No change in link status\n"); 751 752 return ret; 753 } 754 755 static int 756 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 757 { 758 struct axgbe_port *pdata = dev->data->dev_private; 759 760 if (regs->data == NULL) { 761 regs->length = axgbe_regs_get_count(pdata); 762 regs->width = sizeof(uint32_t); 763 return 0; 764 } 765 766 /* Only full register dump is supported */ 767 if (regs->length && 768 regs->length != (uint32_t)axgbe_regs_get_count(pdata)) 769 return -ENOTSUP; 770 771 regs->version = pdata->pci_dev->id.vendor_id << 16 | 772 pdata->pci_dev->id.device_id; 773 axgbe_regs_dump(pdata, regs->data); 774 return 0; 775 } 776 static void axgbe_read_mmc_stats(struct axgbe_port *pdata) 777 { 778 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 779 780 /* Freeze counters */ 781 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 782 783 /* Tx counters */ 784 stats->txoctetcount_gb += 785 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 786 stats->txoctetcount_gb += 787 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32); 788 789 stats->txframecount_gb += 790 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 791 stats->txframecount_gb += 792 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32); 793 794 stats->txbroadcastframes_g += 795 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 796 stats->txbroadcastframes_g += 797 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32); 798 799 stats->txmulticastframes_g += 800 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 801 stats->txmulticastframes_g += 802 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32); 803 804 stats->tx64octets_gb += 805 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 806 stats->tx64octets_gb += 807 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32); 808 809 stats->tx65to127octets_gb += 810 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 811 stats->tx65to127octets_gb += 812 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32); 813 814 stats->tx128to255octets_gb += 815 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 816 stats->tx128to255octets_gb += 817 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32); 818 819 stats->tx256to511octets_gb += 820 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 821 stats->tx256to511octets_gb += 822 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32); 823 824 stats->tx512to1023octets_gb += 825 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 826 stats->tx512to1023octets_gb += 827 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32); 828 829 stats->tx1024tomaxoctets_gb += 830 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 831 stats->tx1024tomaxoctets_gb += 832 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32); 833 834 stats->txunicastframes_gb += 835 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 836 stats->txunicastframes_gb += 837 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32); 838 839 stats->txmulticastframes_gb += 840 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 841 stats->txmulticastframes_gb += 842 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32); 843 844 stats->txbroadcastframes_g += 845 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 846 stats->txbroadcastframes_g += 847 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32); 848 849 stats->txunderflowerror += 850 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 851 stats->txunderflowerror += 852 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32); 853 854 stats->txoctetcount_g += 855 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 856 stats->txoctetcount_g += 857 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32); 858 859 stats->txframecount_g += 860 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 861 stats->txframecount_g += 862 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32); 863 864 stats->txpauseframes += 865 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 866 stats->txpauseframes += 867 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32); 868 869 stats->txvlanframes_g += 870 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 871 stats->txvlanframes_g += 872 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32); 873 874 /* Rx counters */ 875 stats->rxframecount_gb += 876 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 877 stats->rxframecount_gb += 878 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32); 879 880 stats->rxoctetcount_gb += 881 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 882 stats->rxoctetcount_gb += 883 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32); 884 885 stats->rxoctetcount_g += 886 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 887 stats->rxoctetcount_g += 888 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32); 889 890 stats->rxbroadcastframes_g += 891 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 892 stats->rxbroadcastframes_g += 893 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32); 894 895 stats->rxmulticastframes_g += 896 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 897 stats->rxmulticastframes_g += 898 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32); 899 900 stats->rxcrcerror += 901 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 902 stats->rxcrcerror += 903 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32); 904 905 stats->rxrunterror += 906 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 907 908 stats->rxjabbererror += 909 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 910 911 stats->rxundersize_g += 912 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 913 914 stats->rxoversize_g += 915 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 916 917 stats->rx64octets_gb += 918 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 919 stats->rx64octets_gb += 920 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32); 921 922 stats->rx65to127octets_gb += 923 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 924 stats->rx65to127octets_gb += 925 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32); 926 927 stats->rx128to255octets_gb += 928 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 929 stats->rx128to255octets_gb += 930 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32); 931 932 stats->rx256to511octets_gb += 933 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 934 stats->rx256to511octets_gb += 935 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32); 936 937 stats->rx512to1023octets_gb += 938 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 939 stats->rx512to1023octets_gb += 940 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32); 941 942 stats->rx1024tomaxoctets_gb += 943 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 944 stats->rx1024tomaxoctets_gb += 945 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32); 946 947 stats->rxunicastframes_g += 948 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 949 stats->rxunicastframes_g += 950 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32); 951 952 stats->rxlengtherror += 953 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 954 stats->rxlengtherror += 955 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32); 956 957 stats->rxoutofrangetype += 958 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 959 stats->rxoutofrangetype += 960 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32); 961 962 stats->rxpauseframes += 963 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 964 stats->rxpauseframes += 965 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32); 966 967 stats->rxfifooverflow += 968 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 969 stats->rxfifooverflow += 970 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32); 971 972 stats->rxvlanframes_gb += 973 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 974 stats->rxvlanframes_gb += 975 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32); 976 977 stats->rxwatchdogerror += 978 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 979 980 /* Un-freeze counters */ 981 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 982 } 983 984 static int 985 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 986 unsigned int n) 987 { 988 struct axgbe_port *pdata = dev->data->dev_private; 989 unsigned int i; 990 991 if (!stats) 992 return 0; 993 994 axgbe_read_mmc_stats(pdata); 995 996 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) { 997 stats[i].id = i; 998 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats + 999 axgbe_xstats_strings[i].offset); 1000 } 1001 1002 return i; 1003 } 1004 1005 static int 1006 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1007 struct rte_eth_xstat_name *xstats_names, 1008 unsigned int n) 1009 { 1010 unsigned int i; 1011 1012 if (n >= AXGBE_XSTATS_COUNT && xstats_names) { 1013 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) { 1014 snprintf(xstats_names[i].name, 1015 RTE_ETH_XSTATS_NAME_SIZE, "%s", 1016 axgbe_xstats_strings[i].name); 1017 } 1018 } 1019 1020 return AXGBE_XSTATS_COUNT; 1021 } 1022 1023 static int 1024 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1025 uint64_t *values, unsigned int n) 1026 { 1027 unsigned int i; 1028 uint64_t values_copy[AXGBE_XSTATS_COUNT]; 1029 1030 if (!ids) { 1031 struct axgbe_port *pdata = dev->data->dev_private; 1032 1033 if (n < AXGBE_XSTATS_COUNT) 1034 return AXGBE_XSTATS_COUNT; 1035 1036 axgbe_read_mmc_stats(pdata); 1037 1038 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { 1039 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats + 1040 axgbe_xstats_strings[i].offset); 1041 } 1042 1043 return i; 1044 } 1045 1046 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT); 1047 1048 for (i = 0; i < n; i++) { 1049 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1050 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1051 return -1; 1052 } 1053 values[i] = values_copy[ids[i]]; 1054 } 1055 return n; 1056 } 1057 1058 static int 1059 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 1060 struct rte_eth_xstat_name *xstats_names, 1061 const uint64_t *ids, 1062 unsigned int size) 1063 { 1064 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT]; 1065 unsigned int i; 1066 1067 if (!ids) 1068 return axgbe_dev_xstats_get_names(dev, xstats_names, size); 1069 1070 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size); 1071 1072 for (i = 0; i < size; i++) { 1073 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1074 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1075 return -1; 1076 } 1077 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1078 } 1079 return size; 1080 } 1081 1082 static int 1083 axgbe_dev_xstats_reset(struct rte_eth_dev *dev) 1084 { 1085 struct axgbe_port *pdata = dev->data->dev_private; 1086 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 1087 1088 /* MMC registers are configured for reset on read */ 1089 axgbe_read_mmc_stats(pdata); 1090 1091 /* Reset stats */ 1092 memset(stats, 0, sizeof(*stats)); 1093 1094 return 0; 1095 } 1096 1097 static int 1098 axgbe_dev_stats_get(struct rte_eth_dev *dev, 1099 struct rte_eth_stats *stats) 1100 { 1101 struct axgbe_rx_queue *rxq; 1102 struct axgbe_tx_queue *txq; 1103 struct axgbe_port *pdata = dev->data->dev_private; 1104 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats; 1105 unsigned int i; 1106 1107 axgbe_read_mmc_stats(pdata); 1108 1109 stats->imissed = mmc_stats->rxfifooverflow; 1110 1111 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1112 rxq = dev->data->rx_queues[i]; 1113 stats->q_ipackets[i] = rxq->pkts; 1114 stats->ipackets += rxq->pkts; 1115 stats->q_ibytes[i] = rxq->bytes; 1116 stats->ibytes += rxq->bytes; 1117 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed; 1118 stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed; 1119 stats->ierrors += rxq->errors; 1120 } 1121 1122 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1123 txq = dev->data->tx_queues[i]; 1124 stats->q_opackets[i] = txq->pkts; 1125 stats->opackets += txq->pkts; 1126 stats->q_obytes[i] = txq->bytes; 1127 stats->obytes += txq->bytes; 1128 stats->oerrors += txq->errors; 1129 } 1130 1131 return 0; 1132 } 1133 1134 static int 1135 axgbe_dev_stats_reset(struct rte_eth_dev *dev) 1136 { 1137 struct axgbe_rx_queue *rxq; 1138 struct axgbe_tx_queue *txq; 1139 unsigned int i; 1140 1141 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1142 rxq = dev->data->rx_queues[i]; 1143 rxq->pkts = 0; 1144 rxq->bytes = 0; 1145 rxq->errors = 0; 1146 rxq->rx_mbuf_alloc_failed = 0; 1147 } 1148 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1149 txq = dev->data->tx_queues[i]; 1150 txq->pkts = 0; 1151 txq->bytes = 0; 1152 txq->errors = 0; 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int 1159 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1160 { 1161 struct axgbe_port *pdata = dev->data->dev_private; 1162 1163 dev_info->max_rx_queues = pdata->rx_ring_count; 1164 dev_info->max_tx_queues = pdata->tx_ring_count; 1165 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 1166 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 1167 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1; 1168 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size; 1169 dev_info->speed_capa = ETH_LINK_SPEED_10G; 1170 1171 dev_info->rx_offload_capa = 1172 DEV_RX_OFFLOAD_IPV4_CKSUM | 1173 DEV_RX_OFFLOAD_UDP_CKSUM | 1174 DEV_RX_OFFLOAD_TCP_CKSUM | 1175 DEV_RX_OFFLOAD_JUMBO_FRAME | 1176 DEV_RX_OFFLOAD_SCATTER | 1177 DEV_RX_OFFLOAD_KEEP_CRC; 1178 1179 dev_info->tx_offload_capa = 1180 DEV_TX_OFFLOAD_IPV4_CKSUM | 1181 DEV_TX_OFFLOAD_UDP_CKSUM | 1182 DEV_TX_OFFLOAD_TCP_CKSUM; 1183 1184 if (pdata->hw_feat.rss) { 1185 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 1186 dev_info->reta_size = pdata->hw_feat.hash_table_size; 1187 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 1188 } 1189 1190 dev_info->rx_desc_lim = rx_desc_lim; 1191 dev_info->tx_desc_lim = tx_desc_lim; 1192 1193 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1194 .rx_free_thresh = AXGBE_RX_FREE_THRESH, 1195 }; 1196 1197 dev_info->default_txconf = (struct rte_eth_txconf) { 1198 .tx_free_thresh = AXGBE_TX_FREE_THRESH, 1199 }; 1200 1201 return 0; 1202 } 1203 1204 static int 1205 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1206 { 1207 struct axgbe_port *pdata = dev->data->dev_private; 1208 struct xgbe_fc_info fc = pdata->fc; 1209 unsigned int reg, reg_val = 0; 1210 1211 reg = MAC_Q0TFCR; 1212 reg_val = AXGMAC_IOREAD(pdata, reg); 1213 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA); 1214 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD); 1215 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT); 1216 fc.autoneg = pdata->pause_autoneg; 1217 1218 if (pdata->rx_pause && pdata->tx_pause) 1219 fc.mode = RTE_FC_FULL; 1220 else if (pdata->rx_pause) 1221 fc.mode = RTE_FC_RX_PAUSE; 1222 else if (pdata->tx_pause) 1223 fc.mode = RTE_FC_TX_PAUSE; 1224 else 1225 fc.mode = RTE_FC_NONE; 1226 1227 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024; 1228 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024; 1229 fc_conf->pause_time = fc.pause_time[0]; 1230 fc_conf->send_xon = fc.send_xon; 1231 fc_conf->mode = fc.mode; 1232 1233 return 0; 1234 } 1235 1236 static int 1237 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1238 { 1239 struct axgbe_port *pdata = dev->data->dev_private; 1240 struct xgbe_fc_info fc = pdata->fc; 1241 unsigned int reg, reg_val = 0; 1242 reg = MAC_Q0TFCR; 1243 1244 pdata->pause_autoneg = fc_conf->autoneg; 1245 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1246 fc.send_xon = fc_conf->send_xon; 1247 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA, 1248 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water)); 1249 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD, 1250 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water)); 1251 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time); 1252 AXGMAC_IOWRITE(pdata, reg, reg_val); 1253 fc.mode = fc_conf->mode; 1254 1255 if (fc.mode == RTE_FC_FULL) { 1256 pdata->tx_pause = 1; 1257 pdata->rx_pause = 1; 1258 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1259 pdata->tx_pause = 0; 1260 pdata->rx_pause = 1; 1261 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1262 pdata->tx_pause = 1; 1263 pdata->rx_pause = 0; 1264 } else { 1265 pdata->tx_pause = 0; 1266 pdata->rx_pause = 0; 1267 } 1268 1269 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1270 pdata->hw_if.config_tx_flow_control(pdata); 1271 1272 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1273 pdata->hw_if.config_rx_flow_control(pdata); 1274 1275 pdata->hw_if.config_flow_control(pdata); 1276 pdata->phy.tx_pause = pdata->tx_pause; 1277 pdata->phy.rx_pause = pdata->rx_pause; 1278 1279 return 0; 1280 } 1281 1282 static int 1283 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 1284 struct rte_eth_pfc_conf *pfc_conf) 1285 { 1286 struct axgbe_port *pdata = dev->data->dev_private; 1287 struct xgbe_fc_info fc = pdata->fc; 1288 uint8_t tc_num; 1289 1290 tc_num = pdata->pfc_map[pfc_conf->priority]; 1291 1292 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { 1293 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", 1294 pdata->hw_feat.tc_cnt); 1295 return -EINVAL; 1296 } 1297 1298 pdata->pause_autoneg = pfc_conf->fc.autoneg; 1299 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1300 fc.send_xon = pfc_conf->fc.send_xon; 1301 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA, 1302 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water)); 1303 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD, 1304 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water)); 1305 1306 switch (tc_num) { 1307 case 0: 1308 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1309 PSTC0, pfc_conf->fc.pause_time); 1310 break; 1311 case 1: 1312 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1313 PSTC1, pfc_conf->fc.pause_time); 1314 break; 1315 case 2: 1316 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1317 PSTC2, pfc_conf->fc.pause_time); 1318 break; 1319 case 3: 1320 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1321 PSTC3, pfc_conf->fc.pause_time); 1322 break; 1323 case 4: 1324 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1325 PSTC4, pfc_conf->fc.pause_time); 1326 break; 1327 case 5: 1328 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1329 PSTC5, pfc_conf->fc.pause_time); 1330 break; 1331 case 7: 1332 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1333 PSTC6, pfc_conf->fc.pause_time); 1334 break; 1335 case 6: 1336 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1337 PSTC7, pfc_conf->fc.pause_time); 1338 break; 1339 } 1340 1341 fc.mode = pfc_conf->fc.mode; 1342 1343 if (fc.mode == RTE_FC_FULL) { 1344 pdata->tx_pause = 1; 1345 pdata->rx_pause = 1; 1346 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1347 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1348 pdata->tx_pause = 0; 1349 pdata->rx_pause = 1; 1350 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1351 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1352 pdata->tx_pause = 1; 1353 pdata->rx_pause = 0; 1354 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1355 } else { 1356 pdata->tx_pause = 0; 1357 pdata->rx_pause = 0; 1358 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1359 } 1360 1361 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1362 pdata->hw_if.config_tx_flow_control(pdata); 1363 1364 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1365 pdata->hw_if.config_rx_flow_control(pdata); 1366 pdata->hw_if.config_flow_control(pdata); 1367 pdata->phy.tx_pause = pdata->tx_pause; 1368 pdata->phy.rx_pause = pdata->rx_pause; 1369 1370 return 0; 1371 } 1372 1373 void 1374 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1375 struct rte_eth_rxq_info *qinfo) 1376 { 1377 struct axgbe_rx_queue *rxq; 1378 1379 rxq = dev->data->rx_queues[queue_id]; 1380 qinfo->mp = rxq->mb_pool; 1381 qinfo->scattered_rx = dev->data->scattered_rx; 1382 qinfo->nb_desc = rxq->nb_desc; 1383 qinfo->conf.rx_free_thresh = rxq->free_thresh; 1384 } 1385 1386 void 1387 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1388 struct rte_eth_txq_info *qinfo) 1389 { 1390 struct axgbe_tx_queue *txq; 1391 1392 txq = dev->data->tx_queues[queue_id]; 1393 qinfo->nb_desc = txq->nb_desc; 1394 qinfo->conf.tx_free_thresh = txq->free_thresh; 1395 } 1396 const uint32_t * 1397 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1398 { 1399 static const uint32_t ptypes[] = { 1400 RTE_PTYPE_L2_ETHER, 1401 RTE_PTYPE_L2_ETHER_TIMESYNC, 1402 RTE_PTYPE_L2_ETHER_LLDP, 1403 RTE_PTYPE_L2_ETHER_ARP, 1404 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1405 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 1406 RTE_PTYPE_L4_FRAG, 1407 RTE_PTYPE_L4_ICMP, 1408 RTE_PTYPE_L4_NONFRAG, 1409 RTE_PTYPE_L4_SCTP, 1410 RTE_PTYPE_L4_TCP, 1411 RTE_PTYPE_L4_UDP, 1412 RTE_PTYPE_TUNNEL_GRENAT, 1413 RTE_PTYPE_TUNNEL_IP, 1414 RTE_PTYPE_INNER_L2_ETHER, 1415 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1416 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 1417 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 1418 RTE_PTYPE_INNER_L4_FRAG, 1419 RTE_PTYPE_INNER_L4_ICMP, 1420 RTE_PTYPE_INNER_L4_NONFRAG, 1421 RTE_PTYPE_INNER_L4_SCTP, 1422 RTE_PTYPE_INNER_L4_TCP, 1423 RTE_PTYPE_INNER_L4_UDP, 1424 RTE_PTYPE_UNKNOWN 1425 }; 1426 1427 if (dev->rx_pkt_burst == axgbe_recv_pkts) 1428 return ptypes; 1429 return NULL; 1430 } 1431 1432 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1433 { 1434 struct rte_eth_dev_info dev_info; 1435 struct axgbe_port *pdata = dev->data->dev_private; 1436 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1437 unsigned int val = 0; 1438 axgbe_dev_info_get(dev, &dev_info); 1439 /* check that mtu is within the allowed range */ 1440 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 1441 return -EINVAL; 1442 /* mtu setting is forbidden if port is start */ 1443 if (dev->data->dev_started) { 1444 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", 1445 dev->data->port_id); 1446 return -EBUSY; 1447 } 1448 if (frame_size > RTE_ETHER_MAX_LEN) { 1449 dev->data->dev_conf.rxmode.offloads |= 1450 DEV_RX_OFFLOAD_JUMBO_FRAME; 1451 val = 1; 1452 } else { 1453 dev->data->dev_conf.rxmode.offloads &= 1454 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1455 val = 0; 1456 } 1457 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 1458 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1459 return 0; 1460 } 1461 1462 static void 1463 axgbe_update_tstamp_time(struct axgbe_port *pdata, 1464 unsigned int sec, unsigned int nsec, int addsub) 1465 { 1466 unsigned int count = 100; 1467 uint32_t sub_val = 0; 1468 uint32_t sub_val_sec = 0xFFFFFFFF; 1469 uint32_t sub_val_nsec = 0x3B9ACA00; 1470 1471 if (addsub) { 1472 if (sec) 1473 sub_val = sub_val_sec - (sec - 1); 1474 else 1475 sub_val = sec; 1476 1477 AXGMAC_IOWRITE(pdata, MAC_STSUR, sub_val); 1478 sub_val = sub_val_nsec - nsec; 1479 AXGMAC_IOWRITE(pdata, MAC_STNUR, sub_val); 1480 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 1); 1481 } else { 1482 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1483 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 0); 1484 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1485 } 1486 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1); 1487 /* Wait for time update to complete */ 1488 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT)) 1489 rte_delay_ms(1); 1490 } 1491 1492 static inline uint64_t 1493 div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder) 1494 { 1495 *remainder = dividend % divisor; 1496 return dividend / divisor; 1497 } 1498 1499 static inline uint64_t 1500 div_u64(uint64_t dividend, uint32_t divisor) 1501 { 1502 uint32_t remainder; 1503 return div_u64_rem(dividend, divisor, &remainder); 1504 } 1505 1506 static int 1507 axgbe_adjfreq(struct axgbe_port *pdata, int64_t delta) 1508 { 1509 uint64_t adjust; 1510 uint32_t addend, diff; 1511 unsigned int neg_adjust = 0; 1512 1513 if (delta < 0) { 1514 neg_adjust = 1; 1515 delta = -delta; 1516 } 1517 adjust = (uint64_t)pdata->tstamp_addend; 1518 adjust *= delta; 1519 diff = (uint32_t)div_u64(adjust, 1000000000UL); 1520 addend = (neg_adjust) ? pdata->tstamp_addend - diff : 1521 pdata->tstamp_addend + diff; 1522 pdata->tstamp_addend = addend; 1523 axgbe_update_tstamp_addend(pdata, addend); 1524 return 0; 1525 } 1526 1527 static int 1528 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 1529 { 1530 struct axgbe_port *pdata = dev->data->dev_private; 1531 struct timespec timestamp_delta; 1532 1533 axgbe_adjfreq(pdata, delta); 1534 pdata->systime_tc.nsec += delta; 1535 1536 if (delta < 0) { 1537 delta = -delta; 1538 timestamp_delta = rte_ns_to_timespec(delta); 1539 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec, 1540 timestamp_delta.tv_nsec, 1); 1541 } else { 1542 timestamp_delta = rte_ns_to_timespec(delta); 1543 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec, 1544 timestamp_delta.tv_nsec, 0); 1545 } 1546 return 0; 1547 } 1548 1549 static int 1550 axgbe_timesync_read_time(struct rte_eth_dev *dev, 1551 struct timespec *timestamp) 1552 { 1553 uint64_t nsec; 1554 struct axgbe_port *pdata = dev->data->dev_private; 1555 1556 nsec = AXGMAC_IOREAD(pdata, MAC_STSR); 1557 nsec *= NSEC_PER_SEC; 1558 nsec += AXGMAC_IOREAD(pdata, MAC_STNR); 1559 *timestamp = rte_ns_to_timespec(nsec); 1560 return 0; 1561 } 1562 static int 1563 axgbe_timesync_write_time(struct rte_eth_dev *dev, 1564 const struct timespec *timestamp) 1565 { 1566 unsigned int count = 100; 1567 struct axgbe_port *pdata = dev->data->dev_private; 1568 1569 AXGMAC_IOWRITE(pdata, MAC_STSUR, timestamp->tv_sec); 1570 AXGMAC_IOWRITE(pdata, MAC_STNUR, timestamp->tv_nsec); 1571 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1); 1572 /* Wait for time update to complete */ 1573 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT)) 1574 rte_delay_ms(1); 1575 if (!count) 1576 PMD_DRV_LOG(ERR, "Timed out update timestamp\n"); 1577 return 0; 1578 } 1579 1580 static void 1581 axgbe_update_tstamp_addend(struct axgbe_port *pdata, 1582 uint32_t addend) 1583 { 1584 unsigned int count = 100; 1585 1586 AXGMAC_IOWRITE(pdata, MAC_TSAR, addend); 1587 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); 1588 1589 /* Wait for addend update to complete */ 1590 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) 1591 rte_delay_ms(1); 1592 if (!count) 1593 PMD_DRV_LOG(ERR, "Timed out updating timestamp addend register\n"); 1594 } 1595 1596 static void 1597 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec, 1598 unsigned int nsec) 1599 { 1600 unsigned int count = 100; 1601 1602 /*System Time Sec Update*/ 1603 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1604 /*System Time nanoSec Update*/ 1605 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1606 /*Initialize Timestamp*/ 1607 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); 1608 1609 /* Wait for time update to complete */ 1610 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) 1611 rte_delay_ms(1); 1612 if (!count) 1613 PMD_DRV_LOG(ERR, "Timed out initializing timestamp\n"); 1614 } 1615 1616 static int 1617 axgbe_timesync_enable(struct rte_eth_dev *dev) 1618 { 1619 struct axgbe_port *pdata = dev->data->dev_private; 1620 unsigned int mac_tscr = 0; 1621 uint64_t dividend; 1622 struct timespec timestamp; 1623 uint64_t nsec; 1624 1625 /* Set one nano-second accuracy */ 1626 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); 1627 1628 /* Set fine timestamp update */ 1629 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); 1630 1631 /* Overwrite earlier timestamps */ 1632 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); 1633 1634 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1635 1636 /* Enabling processing of ptp over eth pkt */ 1637 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); 1638 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1639 /* Enable timestamp for all pkts*/ 1640 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1); 1641 1642 /* enabling timestamp */ 1643 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1644 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1645 1646 /* Exit if timestamping is not enabled */ 1647 if (!AXGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) { 1648 PMD_DRV_LOG(ERR, "Exiting as timestamp is not enabled\n"); 1649 return 0; 1650 } 1651 1652 /* Sub-second Increment Value*/ 1653 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, AXGBE_TSTAMP_SSINC); 1654 /* Sub-nanosecond Increment Value */ 1655 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, AXGBE_TSTAMP_SNSINC); 1656 1657 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 1658 dividend = 50000000; 1659 dividend <<= 32; 1660 pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate); 1661 1662 axgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); 1663 axgbe_set_tstamp_time(pdata, 0, 0); 1664 1665 /* Initialize the timecounter */ 1666 memset(&pdata->systime_tc, 0, sizeof(struct rte_timecounter)); 1667 1668 pdata->systime_tc.cc_mask = AXGBE_CYCLECOUNTER_MASK; 1669 pdata->systime_tc.cc_shift = 0; 1670 pdata->systime_tc.nsec_mask = 0; 1671 1672 PMD_DRV_LOG(DEBUG, "Initializing system time counter with realtime\n"); 1673 1674 /* Updating the counter once with clock real time */ 1675 clock_gettime(CLOCK_REALTIME, ×tamp); 1676 nsec = rte_timespec_to_ns(×tamp); 1677 nsec = rte_timecounter_update(&pdata->systime_tc, nsec); 1678 axgbe_set_tstamp_time(pdata, timestamp.tv_sec, timestamp.tv_nsec); 1679 return 0; 1680 } 1681 1682 static int 1683 axgbe_timesync_disable(struct rte_eth_dev *dev) 1684 { 1685 struct axgbe_port *pdata = dev->data->dev_private; 1686 unsigned int mac_tscr = 0; 1687 1688 /*disable timestamp for all pkts*/ 1689 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 0); 1690 /*disable the addened register*/ 1691 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 0); 1692 /* disable timestamp update */ 1693 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 0); 1694 /*disable time stamp*/ 1695 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 0); 1696 return 0; 1697 } 1698 1699 static int 1700 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 1701 struct timespec *timestamp, uint32_t flags) 1702 { 1703 uint64_t nsec = 0; 1704 volatile union axgbe_rx_desc *desc; 1705 uint16_t idx, pmt; 1706 struct axgbe_rx_queue *rxq = *dev->data->rx_queues; 1707 1708 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 1709 desc = &rxq->desc[idx]; 1710 1711 while (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 1712 rte_delay_ms(1); 1713 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CTXT)) { 1714 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_CONTEXT_DESC3, TSA) && 1715 !AXGMAC_GET_BITS_LE(desc->write.desc3, 1716 RX_CONTEXT_DESC3, TSD)) { 1717 pmt = AXGMAC_GET_BITS_LE(desc->write.desc3, 1718 RX_CONTEXT_DESC3, PMT); 1719 nsec = rte_le_to_cpu_32(desc->write.desc1); 1720 nsec *= NSEC_PER_SEC; 1721 nsec += rte_le_to_cpu_32(desc->write.desc0); 1722 if (nsec != 0xffffffffffffffffULL) { 1723 if (pmt == 0x01) 1724 *timestamp = rte_ns_to_timespec(nsec); 1725 PMD_DRV_LOG(DEBUG, 1726 "flags = 0x%x nsec = %"PRIu64"\n", 1727 flags, nsec); 1728 } 1729 } 1730 } 1731 1732 return 0; 1733 } 1734 1735 static int 1736 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 1737 struct timespec *timestamp) 1738 { 1739 uint64_t nsec; 1740 struct axgbe_port *pdata = dev->data->dev_private; 1741 unsigned int tx_snr, tx_ssr; 1742 1743 rte_delay_us(5); 1744 if (pdata->vdata->tx_tstamp_workaround) { 1745 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR); 1746 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR); 1747 1748 } else { 1749 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR); 1750 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR); 1751 } 1752 if (AXGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) { 1753 PMD_DRV_LOG(DEBUG, "Waiting for TXTSSTSMIS\n"); 1754 return 0; 1755 } 1756 nsec = tx_ssr; 1757 nsec *= NSEC_PER_SEC; 1758 nsec += tx_snr; 1759 PMD_DRV_LOG(DEBUG, "nsec = %"PRIu64" tx_ssr = %d tx_snr = %d\n", 1760 nsec, tx_ssr, tx_snr); 1761 *timestamp = rte_ns_to_timespec(nsec); 1762 return 0; 1763 } 1764 1765 static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 1766 { 1767 unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 1768 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 1769 1770 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 1771 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 1772 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 1773 1774 memset(hw_feat, 0, sizeof(*hw_feat)); 1775 1776 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 1777 1778 /* Hardware feature register 0 */ 1779 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 1780 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 1781 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 1782 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 1783 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 1784 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 1785 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 1786 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 1787 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 1788 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 1789 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 1790 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 1791 ADDMACADRSEL); 1792 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 1793 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 1794 1795 /* Hardware feature register 1 */ 1796 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1797 RXFIFOSIZE); 1798 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1799 TXFIFOSIZE); 1800 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 1801 MAC_HWF1R, ADVTHWORD); 1802 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 1803 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 1804 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 1805 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 1806 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 1807 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 1808 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 1809 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1810 HASHTBLSZ); 1811 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1812 L3L4FNUM); 1813 1814 /* Hardware feature register 2 */ 1815 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 1816 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 1817 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 1818 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 1819 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 1820 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 1821 AUXSNAPNUM); 1822 1823 /* Translate the Hash Table size into actual number */ 1824 switch (hw_feat->hash_table_size) { 1825 case 0: 1826 break; 1827 case 1: 1828 hw_feat->hash_table_size = 64; 1829 break; 1830 case 2: 1831 hw_feat->hash_table_size = 128; 1832 break; 1833 case 3: 1834 hw_feat->hash_table_size = 256; 1835 break; 1836 } 1837 1838 /* Translate the address width setting into actual number */ 1839 switch (hw_feat->dma_width) { 1840 case 0: 1841 hw_feat->dma_width = 32; 1842 break; 1843 case 1: 1844 hw_feat->dma_width = 40; 1845 break; 1846 case 2: 1847 hw_feat->dma_width = 48; 1848 break; 1849 default: 1850 hw_feat->dma_width = 32; 1851 } 1852 1853 /* The Queue, Channel and TC counts are zero based so increment them 1854 * to get the actual number 1855 */ 1856 hw_feat->rx_q_cnt++; 1857 hw_feat->tx_q_cnt++; 1858 hw_feat->rx_ch_cnt++; 1859 hw_feat->tx_ch_cnt++; 1860 hw_feat->tc_cnt++; 1861 1862 /* Translate the fifo sizes into actual numbers */ 1863 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 1864 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 1865 } 1866 1867 static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 1868 { 1869 axgbe_init_function_ptrs_dev(&pdata->hw_if); 1870 axgbe_init_function_ptrs_phy(&pdata->phy_if); 1871 axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 1872 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 1873 } 1874 1875 static void axgbe_set_counts(struct axgbe_port *pdata) 1876 { 1877 /* Set all the function pointers */ 1878 axgbe_init_all_fptrs(pdata); 1879 1880 /* Populate the hardware features */ 1881 axgbe_get_all_hw_features(pdata); 1882 1883 /* Set default max values if not provided */ 1884 if (!pdata->tx_max_channel_count) 1885 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 1886 if (!pdata->rx_max_channel_count) 1887 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 1888 1889 if (!pdata->tx_max_q_count) 1890 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 1891 if (!pdata->rx_max_q_count) 1892 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 1893 1894 /* Calculate the number of Tx and Rx rings to be created 1895 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 1896 * the number of Tx queues to the number of Tx channels 1897 * enabled 1898 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 1899 * number of Rx queues or maximum allowed 1900 */ 1901 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 1902 pdata->tx_max_channel_count); 1903 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 1904 pdata->tx_max_q_count); 1905 1906 pdata->tx_q_count = pdata->tx_ring_count; 1907 1908 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 1909 pdata->rx_max_channel_count); 1910 1911 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 1912 pdata->rx_max_q_count); 1913 } 1914 1915 static void axgbe_default_config(struct axgbe_port *pdata) 1916 { 1917 pdata->pblx8 = DMA_PBL_X8_ENABLE; 1918 pdata->tx_sf_mode = MTL_TSF_ENABLE; 1919 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 1920 pdata->tx_pbl = DMA_PBL_32; 1921 pdata->tx_osp_mode = DMA_OSP_ENABLE; 1922 pdata->rx_sf_mode = MTL_RSF_ENABLE; 1923 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 1924 pdata->rx_pbl = DMA_PBL_32; 1925 pdata->pause_autoneg = 1; 1926 pdata->tx_pause = 0; 1927 pdata->rx_pause = 0; 1928 pdata->phy_speed = SPEED_UNKNOWN; 1929 pdata->power_down = 0; 1930 } 1931 1932 static int 1933 pci_device_cmp(const struct rte_device *dev, const void *_pci_id) 1934 { 1935 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); 1936 const struct rte_pci_id *pcid = _pci_id; 1937 1938 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID && 1939 pdev->id.device_id == pcid->device_id) 1940 return 0; 1941 return 1; 1942 } 1943 1944 static bool 1945 pci_search_device(int device_id) 1946 { 1947 struct rte_bus *pci_bus; 1948 struct rte_pci_id dev_id; 1949 1950 dev_id.device_id = device_id; 1951 pci_bus = rte_bus_find_by_name("pci"); 1952 return (pci_bus != NULL) && 1953 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL); 1954 } 1955 1956 /* 1957 * It returns 0 on success. 1958 */ 1959 static int 1960 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 1961 { 1962 PMD_INIT_FUNC_TRACE(); 1963 struct axgbe_port *pdata; 1964 struct rte_pci_device *pci_dev; 1965 uint32_t reg, mac_lo, mac_hi; 1966 uint32_t len; 1967 int ret; 1968 1969 eth_dev->dev_ops = &axgbe_eth_dev_ops; 1970 1971 eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status; 1972 eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status; 1973 1974 /* 1975 * For secondary processes, we don't initialise any further as primary 1976 * has already done this work. 1977 */ 1978 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1979 return 0; 1980 1981 pdata = eth_dev->data->dev_private; 1982 /* initial state */ 1983 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state); 1984 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state); 1985 pdata->eth_dev = eth_dev; 1986 1987 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1988 pdata->pci_dev = pci_dev; 1989 1990 /* 1991 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE 1992 */ 1993 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) { 1994 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; 1995 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; 1996 } else { 1997 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 1998 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 1999 } 2000 2001 pdata->xgmac_regs = 2002 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 2003 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 2004 + AXGBE_MAC_PROP_OFFSET); 2005 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 2006 + AXGBE_I2C_CTRL_OFFSET); 2007 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 2008 2009 /* version specific driver data*/ 2010 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 2011 pdata->vdata = &axgbe_v2a; 2012 else 2013 pdata->vdata = &axgbe_v2b; 2014 2015 /* Configure the PCS indirect addressing support */ 2016 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); 2017 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 2018 pdata->xpcs_window <<= 6; 2019 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 2020 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 2021 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 2022 2023 PMD_INIT_LOG(DEBUG, 2024 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 2025 pdata->xpcs_window_size, pdata->xpcs_window_mask); 2026 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 2027 2028 /* Retrieve the MAC address */ 2029 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 2030 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 2031 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 2032 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 2033 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 2034 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 2035 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 2036 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 2037 2038 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS; 2039 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0); 2040 2041 if (!eth_dev->data->mac_addrs) { 2042 PMD_INIT_LOG(ERR, 2043 "Failed to alloc %u bytes needed to " 2044 "store MAC addresses", len); 2045 return -ENOMEM; 2046 } 2047 2048 /* Allocate memory for storing hash filter MAC addresses */ 2049 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS; 2050 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr", 2051 len, 0); 2052 2053 if (eth_dev->data->hash_mac_addrs == NULL) { 2054 PMD_INIT_LOG(ERR, 2055 "Failed to allocate %d bytes needed to " 2056 "store MAC addresses", len); 2057 return -ENOMEM; 2058 } 2059 2060 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 2061 rte_eth_random_addr(pdata->mac_addr.addr_bytes); 2062 2063 /* Copy the permanent MAC address */ 2064 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 2065 2066 /* Clock settings */ 2067 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 2068 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 2069 2070 /* Set the DMA coherency values */ 2071 pdata->coherent = 1; 2072 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 2073 pdata->arcache = AXGBE_DMA_OS_ARCACHE; 2074 pdata->awcache = AXGBE_DMA_OS_AWCACHE; 2075 2076 /* Set the maximum channels and queues */ 2077 reg = XP_IOREAD(pdata, XP_PROP_1); 2078 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 2079 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 2080 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 2081 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 2082 2083 /* Set the hardware channel and queue counts */ 2084 axgbe_set_counts(pdata); 2085 2086 /* Set the maximum fifo amounts */ 2087 reg = XP_IOREAD(pdata, XP_PROP_2); 2088 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 2089 pdata->tx_max_fifo_size *= 16384; 2090 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 2091 pdata->vdata->tx_max_fifo_size); 2092 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 2093 pdata->rx_max_fifo_size *= 16384; 2094 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 2095 pdata->vdata->rx_max_fifo_size); 2096 /* Issue software reset to DMA */ 2097 ret = pdata->hw_if.exit(pdata); 2098 if (ret) 2099 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 2100 2101 /* Set default configuration data */ 2102 axgbe_default_config(pdata); 2103 2104 /* Set default max values if not provided */ 2105 if (!pdata->tx_max_fifo_size) 2106 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 2107 if (!pdata->rx_max_fifo_size) 2108 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 2109 2110 pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 2111 pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 2112 pthread_mutex_init(&pdata->xpcs_mutex, NULL); 2113 pthread_mutex_init(&pdata->i2c_mutex, NULL); 2114 pthread_mutex_init(&pdata->an_mutex, NULL); 2115 pthread_mutex_init(&pdata->phy_mutex, NULL); 2116 2117 ret = pdata->phy_if.phy_init(pdata); 2118 if (ret) { 2119 rte_free(eth_dev->data->mac_addrs); 2120 eth_dev->data->mac_addrs = NULL; 2121 return ret; 2122 } 2123 2124 rte_intr_callback_register(&pci_dev->intr_handle, 2125 axgbe_dev_interrupt_handler, 2126 (void *)eth_dev); 2127 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 2128 eth_dev->data->port_id, pci_dev->id.vendor_id, 2129 pci_dev->id.device_id); 2130 2131 return 0; 2132 } 2133 2134 static int 2135 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 2136 { 2137 struct rte_pci_device *pci_dev; 2138 2139 PMD_INIT_FUNC_TRACE(); 2140 2141 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2142 return 0; 2143 2144 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2145 eth_dev->dev_ops = NULL; 2146 eth_dev->rx_pkt_burst = NULL; 2147 eth_dev->tx_pkt_burst = NULL; 2148 axgbe_dev_clear_queues(eth_dev); 2149 2150 /* disable uio intr before callback unregister */ 2151 rte_intr_disable(&pci_dev->intr_handle); 2152 rte_intr_callback_unregister(&pci_dev->intr_handle, 2153 axgbe_dev_interrupt_handler, 2154 (void *)eth_dev); 2155 2156 return 0; 2157 } 2158 2159 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2160 struct rte_pci_device *pci_dev) 2161 { 2162 return rte_eth_dev_pci_generic_probe(pci_dev, 2163 sizeof(struct axgbe_port), eth_axgbe_dev_init); 2164 } 2165 2166 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 2167 { 2168 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 2169 } 2170 2171 static struct rte_pci_driver rte_axgbe_pmd = { 2172 .id_table = pci_id_axgbe_map, 2173 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 2174 .probe = eth_axgbe_pci_probe, 2175 .remove = eth_axgbe_pci_remove, 2176 }; 2177 2178 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 2179 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 2180 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 2181 RTE_LOG_REGISTER(axgbe_logtype_init, pmd.net.axgbe.init, NOTICE); 2182 RTE_LOG_REGISTER(axgbe_logtype_driver, pmd.net.axgbe.driver, NOTICE); 2183