1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_rxtx.h" 7 #include "axgbe_ethdev.h" 8 #include "axgbe_common.h" 9 #include "axgbe_phy.h" 10 #include "axgbe_regs.h" 11 #include "rte_time.h" 12 13 #include "eal_filesystem.h" 14 15 #include <rte_vect.h> 16 17 #ifdef RTE_ARCH_X86 18 #include <cpuid.h> 19 #else 20 #define __cpuid(n, a, b, c, d) 21 #endif 22 23 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 24 static int axgbe_dev_configure(struct rte_eth_dev *dev); 25 static int axgbe_dev_start(struct rte_eth_dev *dev); 26 static int axgbe_dev_stop(struct rte_eth_dev *dev); 27 static void axgbe_dev_interrupt_handler(void *param); 28 static int axgbe_dev_close(struct rte_eth_dev *dev); 29 static int axgbe_dev_reset(struct rte_eth_dev *dev); 30 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 31 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 32 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 33 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 34 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, 35 struct rte_ether_addr *mac_addr); 36 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, 37 struct rte_ether_addr *mac_addr, 38 uint32_t index, 39 uint32_t vmdq); 40 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 41 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 42 struct rte_ether_addr *mc_addr_set, 43 uint32_t nb_mc_addr); 44 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 45 struct rte_ether_addr *mac_addr, 46 uint8_t add); 47 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, 48 uint8_t add); 49 static int axgbe_dev_link_update(struct rte_eth_dev *dev, 50 int wait_to_complete); 51 static int axgbe_dev_get_regs(struct rte_eth_dev *dev, 52 struct rte_dev_reg_info *regs); 53 static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 54 struct rte_eth_stats *stats); 55 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev); 56 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev, 57 struct rte_eth_xstat *stats, 58 unsigned int n); 59 static int 60 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 61 struct rte_eth_xstat_name *xstats_names, 62 unsigned int size); 63 static int 64 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, 65 const uint64_t *ids, 66 uint64_t *values, 67 unsigned int n); 68 static int 69 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 70 const uint64_t *ids, 71 struct rte_eth_xstat_name *xstats_names, 72 unsigned int size); 73 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev); 74 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 75 struct rte_eth_rss_reta_entry64 *reta_conf, 76 uint16_t reta_size); 77 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 78 struct rte_eth_rss_reta_entry64 *reta_conf, 79 uint16_t reta_size); 80 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 81 struct rte_eth_rss_conf *rss_conf); 82 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 83 struct rte_eth_rss_conf *rss_conf); 84 static int axgbe_dev_info_get(struct rte_eth_dev *dev, 85 struct rte_eth_dev_info *dev_info); 86 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev, 87 struct rte_eth_fc_conf *fc_conf); 88 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev, 89 struct rte_eth_fc_conf *fc_conf); 90 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 91 struct rte_eth_pfc_conf *pfc_conf); 92 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 93 struct rte_eth_rxq_info *qinfo); 94 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 95 struct rte_eth_txq_info *qinfo); 96 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 97 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 98 99 static int 100 axgbe_timesync_enable(struct rte_eth_dev *dev); 101 static int 102 axgbe_timesync_disable(struct rte_eth_dev *dev); 103 static int 104 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 105 struct timespec *timestamp, uint32_t flags); 106 static int 107 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 108 struct timespec *timestamp); 109 static int 110 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 111 static int 112 axgbe_timesync_read_time(struct rte_eth_dev *dev, 113 struct timespec *timestamp); 114 static int 115 axgbe_timesync_write_time(struct rte_eth_dev *dev, 116 const struct timespec *timestamp); 117 static void 118 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec, 119 unsigned int nsec); 120 static void 121 axgbe_update_tstamp_addend(struct axgbe_port *pdata, 122 unsigned int addend); 123 static int 124 axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on); 125 static int axgbe_vlan_tpid_set(struct rte_eth_dev *dev, 126 enum rte_vlan_type vlan_type, uint16_t tpid); 127 static int axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 128 129 struct axgbe_xstats { 130 char name[RTE_ETH_XSTATS_NAME_SIZE]; 131 int offset; 132 }; 133 134 #define AXGMAC_MMC_STAT(_string, _var) \ 135 { _string, \ 136 offsetof(struct axgbe_mmc_stats, _var), \ 137 } 138 139 static const struct axgbe_xstats axgbe_xstats_strings[] = { 140 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), 141 AXGMAC_MMC_STAT("tx_packets", txframecount_gb), 142 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), 143 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), 144 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), 145 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), 146 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), 147 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), 148 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), 149 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), 150 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), 151 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), 152 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), 153 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes), 154 155 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), 156 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb), 157 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), 158 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), 159 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), 160 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), 161 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), 162 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), 163 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), 164 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), 165 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), 166 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), 167 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), 168 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), 169 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), 170 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), 171 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), 172 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror), 173 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), 174 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), 175 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), 176 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), 177 }; 178 179 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings) 180 181 /* The set of PCI devices this driver supports */ 182 #define AMD_PCI_VENDOR_ID 0x1022 183 184 #define Fam17h 0x17 185 #define Fam19h 0x19 186 187 #define CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 188 #define CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 189 #define CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 190 191 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 192 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 193 194 static const struct rte_pci_id pci_id_axgbe_map[] = { 195 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 196 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 197 { .vendor_id = 0, }, 198 }; 199 200 static struct axgbe_version_data axgbe_v2a = { 201 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 202 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 203 .mmc_64bit = 1, 204 .tx_max_fifo_size = 229376, 205 .rx_max_fifo_size = 229376, 206 .tx_tstamp_workaround = 1, 207 .ecc_support = 1, 208 .i2c_support = 1, 209 .an_cdr_workaround = 1, 210 }; 211 212 static struct axgbe_version_data axgbe_v2b = { 213 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 214 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 215 .mmc_64bit = 1, 216 .tx_max_fifo_size = 65536, 217 .rx_max_fifo_size = 65536, 218 .tx_tstamp_workaround = 1, 219 .ecc_support = 1, 220 .i2c_support = 1, 221 .an_cdr_workaround = 1, 222 }; 223 224 static const struct rte_eth_desc_lim rx_desc_lim = { 225 .nb_max = AXGBE_MAX_RING_DESC, 226 .nb_min = AXGBE_MIN_RING_DESC, 227 .nb_align = 8, 228 }; 229 230 static const struct rte_eth_desc_lim tx_desc_lim = { 231 .nb_max = AXGBE_MAX_RING_DESC, 232 .nb_min = AXGBE_MIN_RING_DESC, 233 .nb_align = 8, 234 }; 235 236 static const struct eth_dev_ops axgbe_eth_dev_ops = { 237 .dev_configure = axgbe_dev_configure, 238 .dev_start = axgbe_dev_start, 239 .dev_stop = axgbe_dev_stop, 240 .dev_close = axgbe_dev_close, 241 .dev_reset = axgbe_dev_reset, 242 .promiscuous_enable = axgbe_dev_promiscuous_enable, 243 .promiscuous_disable = axgbe_dev_promiscuous_disable, 244 .allmulticast_enable = axgbe_dev_allmulticast_enable, 245 .allmulticast_disable = axgbe_dev_allmulticast_disable, 246 .mac_addr_set = axgbe_dev_mac_addr_set, 247 .mac_addr_add = axgbe_dev_mac_addr_add, 248 .mac_addr_remove = axgbe_dev_mac_addr_remove, 249 .set_mc_addr_list = axgbe_dev_set_mc_addr_list, 250 .uc_hash_table_set = axgbe_dev_uc_hash_table_set, 251 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set, 252 .link_update = axgbe_dev_link_update, 253 .get_reg = axgbe_dev_get_regs, 254 .stats_get = axgbe_dev_stats_get, 255 .stats_reset = axgbe_dev_stats_reset, 256 .xstats_get = axgbe_dev_xstats_get, 257 .xstats_reset = axgbe_dev_xstats_reset, 258 .xstats_get_names = axgbe_dev_xstats_get_names, 259 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id, 260 .xstats_get_by_id = axgbe_dev_xstats_get_by_id, 261 .reta_update = axgbe_dev_rss_reta_update, 262 .reta_query = axgbe_dev_rss_reta_query, 263 .rss_hash_update = axgbe_dev_rss_hash_update, 264 .rss_hash_conf_get = axgbe_dev_rss_hash_conf_get, 265 .dev_infos_get = axgbe_dev_info_get, 266 .rx_queue_setup = axgbe_dev_rx_queue_setup, 267 .rx_queue_release = axgbe_dev_rx_queue_release, 268 .tx_queue_setup = axgbe_dev_tx_queue_setup, 269 .tx_queue_release = axgbe_dev_tx_queue_release, 270 .flow_ctrl_get = axgbe_flow_ctrl_get, 271 .flow_ctrl_set = axgbe_flow_ctrl_set, 272 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set, 273 .rxq_info_get = axgbe_rxq_info_get, 274 .txq_info_get = axgbe_txq_info_get, 275 .dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get, 276 .mtu_set = axgb_mtu_set, 277 .vlan_filter_set = axgbe_vlan_filter_set, 278 .vlan_tpid_set = axgbe_vlan_tpid_set, 279 .vlan_offload_set = axgbe_vlan_offload_set, 280 .timesync_enable = axgbe_timesync_enable, 281 .timesync_disable = axgbe_timesync_disable, 282 .timesync_read_rx_timestamp = axgbe_timesync_read_rx_timestamp, 283 .timesync_read_tx_timestamp = axgbe_timesync_read_tx_timestamp, 284 .timesync_adjust_time = axgbe_timesync_adjust_time, 285 .timesync_read_time = axgbe_timesync_read_time, 286 .timesync_write_time = axgbe_timesync_write_time, 287 .fw_version_get = axgbe_dev_fw_version_get, 288 }; 289 290 static int axgbe_phy_reset(struct axgbe_port *pdata) 291 { 292 pdata->phy_link = -1; 293 pdata->phy_speed = SPEED_UNKNOWN; 294 return pdata->phy_if.phy_reset(pdata); 295 } 296 297 /* 298 * Interrupt handler triggered by NIC for handling 299 * specific interrupt. 300 * 301 * @param handle 302 * Pointer to interrupt handle. 303 * @param param 304 * The address of parameter (struct rte_eth_dev *) registered before. 305 * 306 * @return 307 * void 308 */ 309 static void 310 axgbe_dev_interrupt_handler(void *param) 311 { 312 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 313 struct axgbe_port *pdata = dev->data->dev_private; 314 unsigned int dma_isr, dma_ch_isr; 315 316 pdata->phy_if.an_isr(pdata); 317 /*DMA related interrupts*/ 318 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 319 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr); 320 if (dma_isr) { 321 if (dma_isr & 1) { 322 dma_ch_isr = 323 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 324 pdata->rx_queues[0], 325 DMA_CH_SR); 326 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr); 327 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 328 pdata->rx_queues[0], 329 DMA_CH_SR, dma_ch_isr); 330 } 331 } 332 /* Unmask interrupts since disabled after generation */ 333 rte_intr_ack(pdata->pci_dev->intr_handle); 334 } 335 336 /* 337 * Configure device link speed and setup link. 338 * It returns 0 on success. 339 */ 340 static int 341 axgbe_dev_configure(struct rte_eth_dev *dev) 342 { 343 struct axgbe_port *pdata = dev->data->dev_private; 344 /* Checksum offload to hardware */ 345 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 346 RTE_ETH_RX_OFFLOAD_CHECKSUM; 347 return 0; 348 } 349 350 static int 351 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 352 { 353 struct axgbe_port *pdata = dev->data->dev_private; 354 355 if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) 356 pdata->rss_enable = 1; 357 else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE) 358 pdata->rss_enable = 0; 359 else 360 return -1; 361 return 0; 362 } 363 364 static int 365 axgbe_dev_start(struct rte_eth_dev *dev) 366 { 367 struct axgbe_port *pdata = dev->data->dev_private; 368 uint16_t i; 369 int ret; 370 371 dev->dev_ops = &axgbe_eth_dev_ops; 372 373 PMD_INIT_FUNC_TRACE(); 374 375 /* Multiqueue RSS */ 376 ret = axgbe_dev_rx_mq_config(dev); 377 if (ret) { 378 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 379 return ret; 380 } 381 ret = axgbe_phy_reset(pdata); 382 if (ret) { 383 PMD_DRV_LOG(ERR, "phy reset failed\n"); 384 return ret; 385 } 386 ret = pdata->hw_if.init(pdata); 387 if (ret) { 388 PMD_DRV_LOG(ERR, "dev_init failed\n"); 389 return ret; 390 } 391 392 /* enable uio/vfio intr/eventfd mapping */ 393 rte_intr_enable(pdata->pci_dev->intr_handle); 394 395 /* phy start*/ 396 pdata->phy_if.phy_start(pdata); 397 axgbe_dev_enable_tx(dev); 398 axgbe_dev_enable_rx(dev); 399 400 rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state); 401 rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state); 402 403 axgbe_set_rx_function(dev); 404 axgbe_set_tx_function(dev); 405 406 for (i = 0; i < dev->data->nb_rx_queues; i++) 407 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 408 for (i = 0; i < dev->data->nb_tx_queues; i++) 409 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 410 411 return 0; 412 } 413 414 /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 415 static int 416 axgbe_dev_stop(struct rte_eth_dev *dev) 417 { 418 struct axgbe_port *pdata = dev->data->dev_private; 419 420 PMD_INIT_FUNC_TRACE(); 421 422 rte_intr_disable(pdata->pci_dev->intr_handle); 423 424 if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state)) 425 return 0; 426 427 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state); 428 axgbe_dev_disable_tx(dev); 429 axgbe_dev_disable_rx(dev); 430 431 pdata->phy_if.phy_stop(pdata); 432 pdata->hw_if.exit(pdata); 433 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 434 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state); 435 436 return 0; 437 } 438 439 static int 440 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 441 { 442 struct axgbe_port *pdata = dev->data->dev_private; 443 444 PMD_INIT_FUNC_TRACE(); 445 446 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 447 448 return 0; 449 } 450 451 static int 452 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 453 { 454 struct axgbe_port *pdata = dev->data->dev_private; 455 456 PMD_INIT_FUNC_TRACE(); 457 458 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 459 460 return 0; 461 } 462 463 static int 464 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 465 { 466 struct axgbe_port *pdata = dev->data->dev_private; 467 468 PMD_INIT_FUNC_TRACE(); 469 470 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 471 return 0; 472 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 473 474 return 0; 475 } 476 477 static int 478 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 479 { 480 struct axgbe_port *pdata = dev->data->dev_private; 481 482 PMD_INIT_FUNC_TRACE(); 483 484 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 485 return 0; 486 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 487 488 return 0; 489 } 490 491 static int 492 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 493 { 494 struct axgbe_port *pdata = dev->data->dev_private; 495 496 /* Set Default MAC Addr */ 497 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0); 498 499 return 0; 500 } 501 502 static int 503 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 504 uint32_t index, uint32_t pool __rte_unused) 505 { 506 struct axgbe_port *pdata = dev->data->dev_private; 507 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 508 509 if (index > hw_feat->addn_mac) { 510 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 511 return -EINVAL; 512 } 513 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index); 514 return 0; 515 } 516 517 static int 518 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 519 struct rte_eth_rss_reta_entry64 *reta_conf, 520 uint16_t reta_size) 521 { 522 struct axgbe_port *pdata = dev->data->dev_private; 523 unsigned int i, idx, shift; 524 int ret; 525 526 if (!pdata->rss_enable) { 527 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 528 return -ENOTSUP; 529 } 530 531 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 532 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 533 return -EINVAL; 534 } 535 536 for (i = 0; i < reta_size; i++) { 537 idx = i / RTE_ETH_RETA_GROUP_SIZE; 538 shift = i % RTE_ETH_RETA_GROUP_SIZE; 539 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 540 continue; 541 pdata->rss_table[i] = reta_conf[idx].reta[shift]; 542 } 543 544 /* Program the lookup table */ 545 ret = axgbe_write_rss_lookup_table(pdata); 546 return ret; 547 } 548 549 static int 550 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 551 struct rte_eth_rss_reta_entry64 *reta_conf, 552 uint16_t reta_size) 553 { 554 struct axgbe_port *pdata = dev->data->dev_private; 555 unsigned int i, idx, shift; 556 557 if (!pdata->rss_enable) { 558 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 559 return -ENOTSUP; 560 } 561 562 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 563 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 564 return -EINVAL; 565 } 566 567 for (i = 0; i < reta_size; i++) { 568 idx = i / RTE_ETH_RETA_GROUP_SIZE; 569 shift = i % RTE_ETH_RETA_GROUP_SIZE; 570 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 571 continue; 572 reta_conf[idx].reta[shift] = pdata->rss_table[i]; 573 } 574 return 0; 575 } 576 577 static int 578 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 579 struct rte_eth_rss_conf *rss_conf) 580 { 581 struct axgbe_port *pdata = dev->data->dev_private; 582 int ret; 583 584 if (!pdata->rss_enable) { 585 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 586 return -ENOTSUP; 587 } 588 589 if (rss_conf == NULL) { 590 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 591 return -EINVAL; 592 } 593 594 if (rss_conf->rss_key != NULL && 595 rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) { 596 rte_memcpy(pdata->rss_key, rss_conf->rss_key, 597 AXGBE_RSS_HASH_KEY_SIZE); 598 /* Program the hash key */ 599 ret = axgbe_write_rss_hash_key(pdata); 600 if (ret != 0) 601 return ret; 602 } 603 604 pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD; 605 606 if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6)) 607 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 608 if (pdata->rss_hf & 609 (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP)) 610 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); 611 if (pdata->rss_hf & 612 (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)) 613 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 614 615 /* Set the RSS options */ 616 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 617 618 return 0; 619 } 620 621 static int 622 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 623 struct rte_eth_rss_conf *rss_conf) 624 { 625 struct axgbe_port *pdata = dev->data->dev_private; 626 627 if (!pdata->rss_enable) { 628 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 629 return -ENOTSUP; 630 } 631 632 if (rss_conf == NULL) { 633 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 634 return -EINVAL; 635 } 636 637 if (rss_conf->rss_key != NULL && 638 rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) { 639 rte_memcpy(rss_conf->rss_key, pdata->rss_key, 640 AXGBE_RSS_HASH_KEY_SIZE); 641 } 642 rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE; 643 rss_conf->rss_hf = pdata->rss_hf; 644 return 0; 645 } 646 647 static int 648 axgbe_dev_reset(struct rte_eth_dev *dev) 649 { 650 int ret = 0; 651 652 ret = axgbe_dev_close(dev); 653 if (ret) 654 return ret; 655 656 ret = eth_axgbe_dev_init(dev); 657 658 return ret; 659 } 660 661 static void 662 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 663 { 664 struct axgbe_port *pdata = dev->data->dev_private; 665 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 666 667 if (index > hw_feat->addn_mac) { 668 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 669 return; 670 } 671 axgbe_set_mac_addn_addr(pdata, NULL, index); 672 } 673 674 static int 675 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 676 struct rte_ether_addr *mc_addr_set, 677 uint32_t nb_mc_addr) 678 { 679 struct axgbe_port *pdata = dev->data->dev_private; 680 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 681 uint32_t index = 1; /* 0 is always default mac */ 682 uint32_t i; 683 684 if (nb_mc_addr > hw_feat->addn_mac) { 685 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr); 686 return -EINVAL; 687 } 688 689 /* clear unicast addresses */ 690 for (i = 1; i < hw_feat->addn_mac; i++) { 691 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) 692 continue; 693 memset(&dev->data->mac_addrs[i], 0, 694 sizeof(struct rte_ether_addr)); 695 } 696 697 while (nb_mc_addr--) 698 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++); 699 700 return 0; 701 } 702 703 static int 704 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 705 struct rte_ether_addr *mac_addr, uint8_t add) 706 { 707 struct axgbe_port *pdata = dev->data->dev_private; 708 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 709 710 if (!hw_feat->hash_table_size) { 711 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 712 return -ENOTSUP; 713 } 714 715 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add); 716 717 if (pdata->uc_hash_mac_addr > 0) { 718 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 719 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 720 } else { 721 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 722 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 723 } 724 return 0; 725 } 726 727 static int 728 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add) 729 { 730 struct axgbe_port *pdata = dev->data->dev_private; 731 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 732 uint32_t index; 733 734 if (!hw_feat->hash_table_size) { 735 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 736 return -ENOTSUP; 737 } 738 739 for (index = 0; index < pdata->hash_table_count; index++) { 740 if (add) 741 pdata->uc_hash_table[index] = ~0; 742 else 743 pdata->uc_hash_table[index] = 0; 744 745 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n", 746 add ? "set" : "clear", index); 747 748 AXGMAC_IOWRITE(pdata, MAC_HTR(index), 749 pdata->uc_hash_table[index]); 750 } 751 752 if (add) { 753 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 754 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 755 } else { 756 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 757 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 758 } 759 return 0; 760 } 761 762 /* return 0 means link status changed, -1 means not changed */ 763 static int 764 axgbe_dev_link_update(struct rte_eth_dev *dev, 765 int wait_to_complete __rte_unused) 766 { 767 struct axgbe_port *pdata = dev->data->dev_private; 768 struct rte_eth_link link; 769 int ret = 0; 770 771 PMD_INIT_FUNC_TRACE(); 772 rte_delay_ms(800); 773 774 pdata->phy_if.phy_status(pdata); 775 776 memset(&link, 0, sizeof(struct rte_eth_link)); 777 link.link_duplex = pdata->phy.duplex; 778 link.link_status = pdata->phy_link; 779 link.link_speed = pdata->phy_speed; 780 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 781 RTE_ETH_LINK_SPEED_FIXED); 782 ret = rte_eth_linkstatus_set(dev, &link); 783 if (ret == -1) 784 PMD_DRV_LOG(ERR, "No change in link status\n"); 785 786 return ret; 787 } 788 789 static int 790 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 791 { 792 struct axgbe_port *pdata = dev->data->dev_private; 793 794 if (regs->data == NULL) { 795 regs->length = axgbe_regs_get_count(pdata); 796 regs->width = sizeof(uint32_t); 797 return 0; 798 } 799 800 /* Only full register dump is supported */ 801 if (regs->length && 802 regs->length != (uint32_t)axgbe_regs_get_count(pdata)) 803 return -ENOTSUP; 804 805 regs->version = pdata->pci_dev->id.vendor_id << 16 | 806 pdata->pci_dev->id.device_id; 807 axgbe_regs_dump(pdata, regs->data); 808 return 0; 809 } 810 static void axgbe_read_mmc_stats(struct axgbe_port *pdata) 811 { 812 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 813 814 /* Freeze counters */ 815 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 816 817 /* Tx counters */ 818 stats->txoctetcount_gb += 819 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 820 stats->txoctetcount_gb += 821 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32); 822 823 stats->txframecount_gb += 824 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 825 stats->txframecount_gb += 826 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32); 827 828 stats->txbroadcastframes_g += 829 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 830 stats->txbroadcastframes_g += 831 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32); 832 833 stats->txmulticastframes_g += 834 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 835 stats->txmulticastframes_g += 836 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32); 837 838 stats->tx64octets_gb += 839 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 840 stats->tx64octets_gb += 841 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32); 842 843 stats->tx65to127octets_gb += 844 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 845 stats->tx65to127octets_gb += 846 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32); 847 848 stats->tx128to255octets_gb += 849 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 850 stats->tx128to255octets_gb += 851 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32); 852 853 stats->tx256to511octets_gb += 854 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 855 stats->tx256to511octets_gb += 856 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32); 857 858 stats->tx512to1023octets_gb += 859 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 860 stats->tx512to1023octets_gb += 861 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32); 862 863 stats->tx1024tomaxoctets_gb += 864 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 865 stats->tx1024tomaxoctets_gb += 866 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32); 867 868 stats->txunicastframes_gb += 869 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 870 stats->txunicastframes_gb += 871 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32); 872 873 stats->txmulticastframes_gb += 874 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 875 stats->txmulticastframes_gb += 876 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32); 877 878 stats->txbroadcastframes_g += 879 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 880 stats->txbroadcastframes_g += 881 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32); 882 883 stats->txunderflowerror += 884 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 885 stats->txunderflowerror += 886 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32); 887 888 stats->txoctetcount_g += 889 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 890 stats->txoctetcount_g += 891 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32); 892 893 stats->txframecount_g += 894 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 895 stats->txframecount_g += 896 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32); 897 898 stats->txpauseframes += 899 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 900 stats->txpauseframes += 901 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32); 902 903 stats->txvlanframes_g += 904 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 905 stats->txvlanframes_g += 906 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32); 907 908 /* Rx counters */ 909 stats->rxframecount_gb += 910 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 911 stats->rxframecount_gb += 912 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32); 913 914 stats->rxoctetcount_gb += 915 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 916 stats->rxoctetcount_gb += 917 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32); 918 919 stats->rxoctetcount_g += 920 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 921 stats->rxoctetcount_g += 922 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32); 923 924 stats->rxbroadcastframes_g += 925 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 926 stats->rxbroadcastframes_g += 927 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32); 928 929 stats->rxmulticastframes_g += 930 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 931 stats->rxmulticastframes_g += 932 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32); 933 934 stats->rxcrcerror += 935 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 936 stats->rxcrcerror += 937 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32); 938 939 stats->rxrunterror += 940 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 941 942 stats->rxjabbererror += 943 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 944 945 stats->rxundersize_g += 946 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 947 948 stats->rxoversize_g += 949 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 950 951 stats->rx64octets_gb += 952 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 953 stats->rx64octets_gb += 954 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32); 955 956 stats->rx65to127octets_gb += 957 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 958 stats->rx65to127octets_gb += 959 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32); 960 961 stats->rx128to255octets_gb += 962 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 963 stats->rx128to255octets_gb += 964 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32); 965 966 stats->rx256to511octets_gb += 967 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 968 stats->rx256to511octets_gb += 969 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32); 970 971 stats->rx512to1023octets_gb += 972 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 973 stats->rx512to1023octets_gb += 974 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32); 975 976 stats->rx1024tomaxoctets_gb += 977 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 978 stats->rx1024tomaxoctets_gb += 979 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32); 980 981 stats->rxunicastframes_g += 982 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 983 stats->rxunicastframes_g += 984 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32); 985 986 stats->rxlengtherror += 987 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 988 stats->rxlengtherror += 989 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32); 990 991 stats->rxoutofrangetype += 992 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 993 stats->rxoutofrangetype += 994 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32); 995 996 stats->rxpauseframes += 997 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 998 stats->rxpauseframes += 999 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32); 1000 1001 stats->rxfifooverflow += 1002 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 1003 stats->rxfifooverflow += 1004 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32); 1005 1006 stats->rxvlanframes_gb += 1007 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 1008 stats->rxvlanframes_gb += 1009 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32); 1010 1011 stats->rxwatchdogerror += 1012 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 1013 1014 /* Un-freeze counters */ 1015 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 1016 } 1017 1018 static int 1019 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 1020 unsigned int n) 1021 { 1022 struct axgbe_port *pdata = dev->data->dev_private; 1023 unsigned int i; 1024 1025 if (n < AXGBE_XSTATS_COUNT) 1026 return AXGBE_XSTATS_COUNT; 1027 1028 axgbe_read_mmc_stats(pdata); 1029 1030 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { 1031 stats[i].id = i; 1032 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats + 1033 axgbe_xstats_strings[i].offset); 1034 } 1035 1036 return AXGBE_XSTATS_COUNT; 1037 } 1038 1039 static int 1040 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1041 struct rte_eth_xstat_name *xstats_names, 1042 unsigned int n) 1043 { 1044 unsigned int i; 1045 1046 if (n >= AXGBE_XSTATS_COUNT && xstats_names) { 1047 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) { 1048 snprintf(xstats_names[i].name, 1049 RTE_ETH_XSTATS_NAME_SIZE, "%s", 1050 axgbe_xstats_strings[i].name); 1051 } 1052 } 1053 1054 return AXGBE_XSTATS_COUNT; 1055 } 1056 1057 static int 1058 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1059 uint64_t *values, unsigned int n) 1060 { 1061 unsigned int i; 1062 uint64_t values_copy[AXGBE_XSTATS_COUNT]; 1063 1064 if (!ids) { 1065 struct axgbe_port *pdata = dev->data->dev_private; 1066 1067 if (n < AXGBE_XSTATS_COUNT) 1068 return AXGBE_XSTATS_COUNT; 1069 1070 axgbe_read_mmc_stats(pdata); 1071 1072 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { 1073 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats + 1074 axgbe_xstats_strings[i].offset); 1075 } 1076 1077 return i; 1078 } 1079 1080 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT); 1081 1082 for (i = 0; i < n; i++) { 1083 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1084 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1085 return -1; 1086 } 1087 values[i] = values_copy[ids[i]]; 1088 } 1089 return n; 1090 } 1091 1092 static int 1093 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 1094 const uint64_t *ids, 1095 struct rte_eth_xstat_name *xstats_names, 1096 unsigned int size) 1097 { 1098 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT]; 1099 unsigned int i; 1100 1101 if (!ids) 1102 return axgbe_dev_xstats_get_names(dev, xstats_names, size); 1103 1104 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size); 1105 1106 for (i = 0; i < size; i++) { 1107 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1108 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1109 return -1; 1110 } 1111 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1112 } 1113 return size; 1114 } 1115 1116 static int 1117 axgbe_dev_xstats_reset(struct rte_eth_dev *dev) 1118 { 1119 struct axgbe_port *pdata = dev->data->dev_private; 1120 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 1121 1122 /* MMC registers are configured for reset on read */ 1123 axgbe_read_mmc_stats(pdata); 1124 1125 /* Reset stats */ 1126 memset(stats, 0, sizeof(*stats)); 1127 1128 return 0; 1129 } 1130 1131 static int 1132 axgbe_dev_stats_get(struct rte_eth_dev *dev, 1133 struct rte_eth_stats *stats) 1134 { 1135 struct axgbe_rx_queue *rxq; 1136 struct axgbe_tx_queue *txq; 1137 struct axgbe_port *pdata = dev->data->dev_private; 1138 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats; 1139 unsigned int i; 1140 1141 axgbe_read_mmc_stats(pdata); 1142 1143 stats->imissed = mmc_stats->rxfifooverflow; 1144 1145 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1146 rxq = dev->data->rx_queues[i]; 1147 if (rxq) { 1148 stats->q_ipackets[i] = rxq->pkts; 1149 stats->ipackets += rxq->pkts; 1150 stats->q_ibytes[i] = rxq->bytes; 1151 stats->ibytes += rxq->bytes; 1152 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed; 1153 stats->q_errors[i] = rxq->errors 1154 + rxq->rx_mbuf_alloc_failed; 1155 stats->ierrors += rxq->errors; 1156 } else { 1157 PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n", 1158 dev->data->port_id); 1159 } 1160 } 1161 1162 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1163 txq = dev->data->tx_queues[i]; 1164 if (txq) { 1165 stats->q_opackets[i] = txq->pkts; 1166 stats->opackets += txq->pkts; 1167 stats->q_obytes[i] = txq->bytes; 1168 stats->obytes += txq->bytes; 1169 stats->oerrors += txq->errors; 1170 } else { 1171 PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n", 1172 dev->data->port_id); 1173 } 1174 } 1175 1176 return 0; 1177 } 1178 1179 static int 1180 axgbe_dev_stats_reset(struct rte_eth_dev *dev) 1181 { 1182 struct axgbe_rx_queue *rxq; 1183 struct axgbe_tx_queue *txq; 1184 unsigned int i; 1185 1186 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1187 rxq = dev->data->rx_queues[i]; 1188 if (rxq) { 1189 rxq->pkts = 0; 1190 rxq->bytes = 0; 1191 rxq->errors = 0; 1192 rxq->rx_mbuf_alloc_failed = 0; 1193 } else { 1194 PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n", 1195 dev->data->port_id); 1196 } 1197 } 1198 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1199 txq = dev->data->tx_queues[i]; 1200 if (txq) { 1201 txq->pkts = 0; 1202 txq->bytes = 0; 1203 txq->errors = 0; 1204 } else { 1205 PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n", 1206 dev->data->port_id); 1207 } 1208 } 1209 1210 return 0; 1211 } 1212 1213 static int 1214 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1215 { 1216 struct axgbe_port *pdata = dev->data->dev_private; 1217 1218 dev_info->max_rx_queues = pdata->rx_ring_count; 1219 dev_info->max_tx_queues = pdata->tx_ring_count; 1220 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 1221 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 1222 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1; 1223 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size; 1224 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G; 1225 1226 dev_info->rx_offload_capa = 1227 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1228 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1229 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | 1230 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1231 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1232 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1233 RTE_ETH_RX_OFFLOAD_SCATTER | 1234 RTE_ETH_RX_OFFLOAD_KEEP_CRC; 1235 1236 dev_info->tx_offload_capa = 1237 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 1238 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | 1239 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 1240 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 1241 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1242 RTE_ETH_TX_OFFLOAD_TCP_CKSUM; 1243 1244 if (pdata->hw_feat.rss) { 1245 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 1246 dev_info->reta_size = pdata->hw_feat.hash_table_size; 1247 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 1248 } 1249 1250 dev_info->rx_desc_lim = rx_desc_lim; 1251 dev_info->tx_desc_lim = tx_desc_lim; 1252 1253 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1254 .rx_free_thresh = AXGBE_RX_FREE_THRESH, 1255 }; 1256 1257 dev_info->default_txconf = (struct rte_eth_txconf) { 1258 .tx_free_thresh = AXGBE_TX_FREE_THRESH, 1259 }; 1260 1261 return 0; 1262 } 1263 1264 static int 1265 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1266 { 1267 struct axgbe_port *pdata = dev->data->dev_private; 1268 struct xgbe_fc_info fc = pdata->fc; 1269 unsigned int reg, reg_val = 0; 1270 1271 reg = MAC_Q0TFCR; 1272 reg_val = AXGMAC_IOREAD(pdata, reg); 1273 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA); 1274 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD); 1275 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT); 1276 fc.autoneg = pdata->pause_autoneg; 1277 1278 if (pdata->rx_pause && pdata->tx_pause) 1279 fc.mode = RTE_ETH_FC_FULL; 1280 else if (pdata->rx_pause) 1281 fc.mode = RTE_ETH_FC_RX_PAUSE; 1282 else if (pdata->tx_pause) 1283 fc.mode = RTE_ETH_FC_TX_PAUSE; 1284 else 1285 fc.mode = RTE_ETH_FC_NONE; 1286 1287 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024; 1288 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024; 1289 fc_conf->pause_time = fc.pause_time[0]; 1290 fc_conf->send_xon = fc.send_xon; 1291 fc_conf->mode = fc.mode; 1292 1293 return 0; 1294 } 1295 1296 static int 1297 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1298 { 1299 struct axgbe_port *pdata = dev->data->dev_private; 1300 struct xgbe_fc_info fc = pdata->fc; 1301 unsigned int reg, reg_val = 0; 1302 reg = MAC_Q0TFCR; 1303 1304 pdata->pause_autoneg = fc_conf->autoneg; 1305 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1306 fc.send_xon = fc_conf->send_xon; 1307 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA, 1308 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water)); 1309 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD, 1310 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water)); 1311 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time); 1312 AXGMAC_IOWRITE(pdata, reg, reg_val); 1313 fc.mode = fc_conf->mode; 1314 1315 if (fc.mode == RTE_ETH_FC_FULL) { 1316 pdata->tx_pause = 1; 1317 pdata->rx_pause = 1; 1318 } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) { 1319 pdata->tx_pause = 0; 1320 pdata->rx_pause = 1; 1321 } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) { 1322 pdata->tx_pause = 1; 1323 pdata->rx_pause = 0; 1324 } else { 1325 pdata->tx_pause = 0; 1326 pdata->rx_pause = 0; 1327 } 1328 1329 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1330 pdata->hw_if.config_tx_flow_control(pdata); 1331 1332 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1333 pdata->hw_if.config_rx_flow_control(pdata); 1334 1335 pdata->hw_if.config_flow_control(pdata); 1336 pdata->phy.tx_pause = pdata->tx_pause; 1337 pdata->phy.rx_pause = pdata->rx_pause; 1338 1339 return 0; 1340 } 1341 1342 static int 1343 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 1344 struct rte_eth_pfc_conf *pfc_conf) 1345 { 1346 struct axgbe_port *pdata = dev->data->dev_private; 1347 struct xgbe_fc_info fc = pdata->fc; 1348 uint8_t tc_num; 1349 1350 tc_num = pdata->pfc_map[pfc_conf->priority]; 1351 1352 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { 1353 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", 1354 pdata->hw_feat.tc_cnt); 1355 return -EINVAL; 1356 } 1357 1358 pdata->pause_autoneg = pfc_conf->fc.autoneg; 1359 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1360 fc.send_xon = pfc_conf->fc.send_xon; 1361 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA, 1362 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water)); 1363 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD, 1364 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water)); 1365 1366 switch (tc_num) { 1367 case 0: 1368 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1369 PSTC0, pfc_conf->fc.pause_time); 1370 break; 1371 case 1: 1372 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1373 PSTC1, pfc_conf->fc.pause_time); 1374 break; 1375 case 2: 1376 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1377 PSTC2, pfc_conf->fc.pause_time); 1378 break; 1379 case 3: 1380 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1381 PSTC3, pfc_conf->fc.pause_time); 1382 break; 1383 case 4: 1384 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1385 PSTC4, pfc_conf->fc.pause_time); 1386 break; 1387 case 5: 1388 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1389 PSTC5, pfc_conf->fc.pause_time); 1390 break; 1391 case 7: 1392 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1393 PSTC6, pfc_conf->fc.pause_time); 1394 break; 1395 case 6: 1396 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1397 PSTC7, pfc_conf->fc.pause_time); 1398 break; 1399 } 1400 1401 fc.mode = pfc_conf->fc.mode; 1402 1403 if (fc.mode == RTE_ETH_FC_FULL) { 1404 pdata->tx_pause = 1; 1405 pdata->rx_pause = 1; 1406 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1407 } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) { 1408 pdata->tx_pause = 0; 1409 pdata->rx_pause = 1; 1410 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1411 } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) { 1412 pdata->tx_pause = 1; 1413 pdata->rx_pause = 0; 1414 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1415 } else { 1416 pdata->tx_pause = 0; 1417 pdata->rx_pause = 0; 1418 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1419 } 1420 1421 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1422 pdata->hw_if.config_tx_flow_control(pdata); 1423 1424 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1425 pdata->hw_if.config_rx_flow_control(pdata); 1426 pdata->hw_if.config_flow_control(pdata); 1427 pdata->phy.tx_pause = pdata->tx_pause; 1428 pdata->phy.rx_pause = pdata->rx_pause; 1429 1430 return 0; 1431 } 1432 1433 void 1434 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1435 struct rte_eth_rxq_info *qinfo) 1436 { 1437 struct axgbe_rx_queue *rxq; 1438 1439 rxq = dev->data->rx_queues[queue_id]; 1440 qinfo->mp = rxq->mb_pool; 1441 qinfo->scattered_rx = dev->data->scattered_rx; 1442 qinfo->nb_desc = rxq->nb_desc; 1443 qinfo->conf.rx_free_thresh = rxq->free_thresh; 1444 } 1445 1446 void 1447 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1448 struct rte_eth_txq_info *qinfo) 1449 { 1450 struct axgbe_tx_queue *txq; 1451 1452 txq = dev->data->tx_queues[queue_id]; 1453 qinfo->nb_desc = txq->nb_desc; 1454 qinfo->conf.tx_free_thresh = txq->free_thresh; 1455 } 1456 const uint32_t * 1457 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1458 { 1459 static const uint32_t ptypes[] = { 1460 RTE_PTYPE_L2_ETHER, 1461 RTE_PTYPE_L2_ETHER_TIMESYNC, 1462 RTE_PTYPE_L2_ETHER_LLDP, 1463 RTE_PTYPE_L2_ETHER_ARP, 1464 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1465 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 1466 RTE_PTYPE_L4_FRAG, 1467 RTE_PTYPE_L4_ICMP, 1468 RTE_PTYPE_L4_NONFRAG, 1469 RTE_PTYPE_L4_SCTP, 1470 RTE_PTYPE_L4_TCP, 1471 RTE_PTYPE_L4_UDP, 1472 RTE_PTYPE_TUNNEL_GRENAT, 1473 RTE_PTYPE_TUNNEL_IP, 1474 RTE_PTYPE_INNER_L2_ETHER, 1475 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1476 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 1477 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 1478 RTE_PTYPE_INNER_L4_FRAG, 1479 RTE_PTYPE_INNER_L4_ICMP, 1480 RTE_PTYPE_INNER_L4_NONFRAG, 1481 RTE_PTYPE_INNER_L4_SCTP, 1482 RTE_PTYPE_INNER_L4_TCP, 1483 RTE_PTYPE_INNER_L4_UDP, 1484 RTE_PTYPE_UNKNOWN 1485 }; 1486 1487 if (dev->rx_pkt_burst == axgbe_recv_pkts) 1488 return ptypes; 1489 return NULL; 1490 } 1491 1492 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1493 { 1494 struct axgbe_port *pdata = dev->data->dev_private; 1495 unsigned int val; 1496 1497 /* mtu setting is forbidden if port is start */ 1498 if (dev->data->dev_started) { 1499 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", 1500 dev->data->port_id); 1501 return -EBUSY; 1502 } 1503 val = mtu > RTE_ETHER_MTU ? 1 : 0; 1504 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 1505 1506 return 0; 1507 } 1508 1509 static void 1510 axgbe_update_tstamp_time(struct axgbe_port *pdata, 1511 unsigned int sec, unsigned int nsec, int addsub) 1512 { 1513 unsigned int count = 100; 1514 uint32_t sub_val = 0; 1515 uint32_t sub_val_sec = 0xFFFFFFFF; 1516 uint32_t sub_val_nsec = 0x3B9ACA00; 1517 1518 if (addsub) { 1519 if (sec) 1520 sub_val = sub_val_sec - (sec - 1); 1521 else 1522 sub_val = sec; 1523 1524 AXGMAC_IOWRITE(pdata, MAC_STSUR, sub_val); 1525 sub_val = sub_val_nsec - nsec; 1526 AXGMAC_IOWRITE(pdata, MAC_STNUR, sub_val); 1527 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 1); 1528 } else { 1529 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1530 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 0); 1531 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1532 } 1533 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1); 1534 /* Wait for time update to complete */ 1535 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT)) 1536 rte_delay_ms(1); 1537 } 1538 1539 static inline uint64_t 1540 div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder) 1541 { 1542 *remainder = dividend % divisor; 1543 return dividend / divisor; 1544 } 1545 1546 static inline uint64_t 1547 div_u64(uint64_t dividend, uint32_t divisor) 1548 { 1549 uint32_t remainder; 1550 return div_u64_rem(dividend, divisor, &remainder); 1551 } 1552 1553 static int 1554 axgbe_adjfreq(struct axgbe_port *pdata, int64_t delta) 1555 { 1556 uint64_t adjust; 1557 uint32_t addend, diff; 1558 unsigned int neg_adjust = 0; 1559 1560 if (delta < 0) { 1561 neg_adjust = 1; 1562 delta = -delta; 1563 } 1564 adjust = (uint64_t)pdata->tstamp_addend; 1565 adjust *= delta; 1566 diff = (uint32_t)div_u64(adjust, 1000000000UL); 1567 addend = (neg_adjust) ? pdata->tstamp_addend - diff : 1568 pdata->tstamp_addend + diff; 1569 pdata->tstamp_addend = addend; 1570 axgbe_update_tstamp_addend(pdata, addend); 1571 return 0; 1572 } 1573 1574 static int 1575 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 1576 { 1577 struct axgbe_port *pdata = dev->data->dev_private; 1578 struct timespec timestamp_delta; 1579 1580 axgbe_adjfreq(pdata, delta); 1581 pdata->systime_tc.nsec += delta; 1582 1583 if (delta < 0) { 1584 delta = -delta; 1585 timestamp_delta = rte_ns_to_timespec(delta); 1586 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec, 1587 timestamp_delta.tv_nsec, 1); 1588 } else { 1589 timestamp_delta = rte_ns_to_timespec(delta); 1590 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec, 1591 timestamp_delta.tv_nsec, 0); 1592 } 1593 return 0; 1594 } 1595 1596 static int 1597 axgbe_timesync_read_time(struct rte_eth_dev *dev, 1598 struct timespec *timestamp) 1599 { 1600 uint64_t nsec; 1601 struct axgbe_port *pdata = dev->data->dev_private; 1602 1603 nsec = AXGMAC_IOREAD(pdata, MAC_STSR); 1604 nsec *= NSEC_PER_SEC; 1605 nsec += AXGMAC_IOREAD(pdata, MAC_STNR); 1606 *timestamp = rte_ns_to_timespec(nsec); 1607 return 0; 1608 } 1609 static int 1610 axgbe_timesync_write_time(struct rte_eth_dev *dev, 1611 const struct timespec *timestamp) 1612 { 1613 unsigned int count = 100; 1614 struct axgbe_port *pdata = dev->data->dev_private; 1615 1616 AXGMAC_IOWRITE(pdata, MAC_STSUR, timestamp->tv_sec); 1617 AXGMAC_IOWRITE(pdata, MAC_STNUR, timestamp->tv_nsec); 1618 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1); 1619 /* Wait for time update to complete */ 1620 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT)) 1621 rte_delay_ms(1); 1622 if (!count) 1623 PMD_DRV_LOG(ERR, "Timed out update timestamp\n"); 1624 return 0; 1625 } 1626 1627 static void 1628 axgbe_update_tstamp_addend(struct axgbe_port *pdata, 1629 uint32_t addend) 1630 { 1631 unsigned int count = 100; 1632 1633 AXGMAC_IOWRITE(pdata, MAC_TSAR, addend); 1634 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); 1635 1636 /* Wait for addend update to complete */ 1637 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) 1638 rte_delay_ms(1); 1639 if (!count) 1640 PMD_DRV_LOG(ERR, "Timed out updating timestamp addend register\n"); 1641 } 1642 1643 static void 1644 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec, 1645 unsigned int nsec) 1646 { 1647 unsigned int count = 100; 1648 1649 /*System Time Sec Update*/ 1650 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1651 /*System Time nanoSec Update*/ 1652 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1653 /*Initialize Timestamp*/ 1654 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); 1655 1656 /* Wait for time update to complete */ 1657 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) 1658 rte_delay_ms(1); 1659 if (!count) 1660 PMD_DRV_LOG(ERR, "Timed out initializing timestamp\n"); 1661 } 1662 1663 static int 1664 axgbe_timesync_enable(struct rte_eth_dev *dev) 1665 { 1666 struct axgbe_port *pdata = dev->data->dev_private; 1667 unsigned int mac_tscr = 0; 1668 uint64_t dividend; 1669 struct timespec timestamp; 1670 uint64_t nsec; 1671 1672 /* Set one nano-second accuracy */ 1673 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); 1674 1675 /* Set fine timestamp update */ 1676 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); 1677 1678 /* Overwrite earlier timestamps */ 1679 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); 1680 1681 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1682 1683 /* Enabling processing of ptp over eth pkt */ 1684 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); 1685 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1686 /* Enable timestamp for all pkts*/ 1687 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1); 1688 1689 /* enabling timestamp */ 1690 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1691 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1692 1693 /* Exit if timestamping is not enabled */ 1694 if (!AXGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) { 1695 PMD_DRV_LOG(ERR, "Exiting as timestamp is not enabled\n"); 1696 return 0; 1697 } 1698 1699 /* Sub-second Increment Value*/ 1700 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, AXGBE_TSTAMP_SSINC); 1701 /* Sub-nanosecond Increment Value */ 1702 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, AXGBE_TSTAMP_SNSINC); 1703 1704 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 1705 dividend = 50000000; 1706 dividend <<= 32; 1707 pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate); 1708 1709 axgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); 1710 axgbe_set_tstamp_time(pdata, 0, 0); 1711 1712 /* Initialize the timecounter */ 1713 memset(&pdata->systime_tc, 0, sizeof(struct rte_timecounter)); 1714 1715 pdata->systime_tc.cc_mask = AXGBE_CYCLECOUNTER_MASK; 1716 pdata->systime_tc.cc_shift = 0; 1717 pdata->systime_tc.nsec_mask = 0; 1718 1719 PMD_DRV_LOG(DEBUG, "Initializing system time counter with realtime\n"); 1720 1721 /* Updating the counter once with clock real time */ 1722 clock_gettime(CLOCK_REALTIME, ×tamp); 1723 nsec = rte_timespec_to_ns(×tamp); 1724 nsec = rte_timecounter_update(&pdata->systime_tc, nsec); 1725 axgbe_set_tstamp_time(pdata, timestamp.tv_sec, timestamp.tv_nsec); 1726 return 0; 1727 } 1728 1729 static int 1730 axgbe_timesync_disable(struct rte_eth_dev *dev) 1731 { 1732 struct axgbe_port *pdata = dev->data->dev_private; 1733 unsigned int mac_tscr = 0; 1734 1735 /*disable timestamp for all pkts*/ 1736 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 0); 1737 /*disable the addened register*/ 1738 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 0); 1739 /* disable timestamp update */ 1740 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 0); 1741 /*disable time stamp*/ 1742 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 0); 1743 return 0; 1744 } 1745 1746 static int 1747 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 1748 struct timespec *timestamp, uint32_t flags) 1749 { 1750 uint64_t nsec = 0; 1751 volatile union axgbe_rx_desc *desc; 1752 uint16_t idx, pmt; 1753 struct axgbe_rx_queue *rxq = *dev->data->rx_queues; 1754 1755 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 1756 desc = &rxq->desc[idx]; 1757 1758 while (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 1759 rte_delay_ms(1); 1760 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CTXT)) { 1761 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_CONTEXT_DESC3, TSA) && 1762 !AXGMAC_GET_BITS_LE(desc->write.desc3, 1763 RX_CONTEXT_DESC3, TSD)) { 1764 pmt = AXGMAC_GET_BITS_LE(desc->write.desc3, 1765 RX_CONTEXT_DESC3, PMT); 1766 nsec = rte_le_to_cpu_32(desc->write.desc1); 1767 nsec *= NSEC_PER_SEC; 1768 nsec += rte_le_to_cpu_32(desc->write.desc0); 1769 if (nsec != 0xffffffffffffffffULL) { 1770 if (pmt == 0x01) 1771 *timestamp = rte_ns_to_timespec(nsec); 1772 PMD_DRV_LOG(DEBUG, 1773 "flags = 0x%x nsec = %"PRIu64"\n", 1774 flags, nsec); 1775 } 1776 } 1777 } 1778 1779 return 0; 1780 } 1781 1782 static int 1783 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 1784 struct timespec *timestamp) 1785 { 1786 uint64_t nsec; 1787 struct axgbe_port *pdata = dev->data->dev_private; 1788 unsigned int tx_snr, tx_ssr; 1789 1790 rte_delay_us(5); 1791 if (pdata->vdata->tx_tstamp_workaround) { 1792 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR); 1793 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR); 1794 1795 } else { 1796 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR); 1797 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR); 1798 } 1799 if (AXGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) { 1800 PMD_DRV_LOG(DEBUG, "Waiting for TXTSSTSMIS\n"); 1801 return 0; 1802 } 1803 nsec = tx_ssr; 1804 nsec *= NSEC_PER_SEC; 1805 nsec += tx_snr; 1806 PMD_DRV_LOG(DEBUG, "nsec = %"PRIu64" tx_ssr = %d tx_snr = %d\n", 1807 nsec, tx_ssr, tx_snr); 1808 *timestamp = rte_ns_to_timespec(nsec); 1809 return 0; 1810 } 1811 1812 static int 1813 axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on) 1814 { 1815 struct axgbe_port *pdata = dev->data->dev_private; 1816 unsigned long vid_bit, vid_idx; 1817 1818 vid_bit = VLAN_TABLE_BIT(vid); 1819 vid_idx = VLAN_TABLE_IDX(vid); 1820 1821 if (on) { 1822 PMD_DRV_LOG(DEBUG, "Set VLAN vid=%d for device = %s\n", 1823 vid, pdata->eth_dev->device->name); 1824 pdata->active_vlans[vid_idx] |= vid_bit; 1825 } else { 1826 PMD_DRV_LOG(DEBUG, "Reset VLAN vid=%d for device = %s\n", 1827 vid, pdata->eth_dev->device->name); 1828 pdata->active_vlans[vid_idx] &= ~vid_bit; 1829 } 1830 pdata->hw_if.update_vlan_hash_table(pdata); 1831 return 0; 1832 } 1833 1834 static int 1835 axgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1836 enum rte_vlan_type vlan_type, 1837 uint16_t tpid) 1838 { 1839 struct axgbe_port *pdata = dev->data->dev_private; 1840 uint32_t reg = 0; 1841 uint32_t qinq = 0; 1842 1843 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP); 1844 PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq); 1845 1846 switch (vlan_type) { 1847 case RTE_ETH_VLAN_TYPE_INNER: 1848 PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n"); 1849 if (qinq) { 1850 if (tpid != 0x8100 && tpid != 0x88a8) 1851 PMD_DRV_LOG(ERR, 1852 "tag supported 0x8100/0x88A8\n"); 1853 PMD_DRV_LOG(DEBUG, "qinq with inner tag\n"); 1854 1855 /*Enable Inner VLAN Tag */ 1856 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 1); 1857 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT); 1858 PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg); 1859 1860 } else { 1861 PMD_DRV_LOG(ERR, 1862 "Inner type not supported in single tag\n"); 1863 } 1864 break; 1865 case RTE_ETH_VLAN_TYPE_OUTER: 1866 PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n"); 1867 if (qinq) { 1868 PMD_DRV_LOG(DEBUG, "double tagging is enabled\n"); 1869 /*Enable outer VLAN tag*/ 1870 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 0); 1871 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT); 1872 PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg); 1873 1874 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 1); 1875 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANIR, CSVL); 1876 PMD_DRV_LOG(DEBUG, "bit CSVL = 0x%x\n", reg); 1877 } else { 1878 if (tpid != 0x8100 && tpid != 0x88a8) 1879 PMD_DRV_LOG(ERR, 1880 "tag supported 0x8100/0x88A8\n"); 1881 } 1882 break; 1883 case RTE_ETH_VLAN_TYPE_MAX: 1884 PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n"); 1885 break; 1886 case RTE_ETH_VLAN_TYPE_UNKNOWN: 1887 PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n"); 1888 break; 1889 } 1890 return 0; 1891 } 1892 1893 static void axgbe_vlan_extend_enable(struct axgbe_port *pdata) 1894 { 1895 int qinq = 0; 1896 1897 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 1); 1898 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP); 1899 PMD_DRV_LOG(DEBUG, "vlan double tag enabled EDVLP:qinq=0x%x\n", qinq); 1900 } 1901 1902 static void axgbe_vlan_extend_disable(struct axgbe_port *pdata) 1903 { 1904 int qinq = 0; 1905 1906 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 0); 1907 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP); 1908 PMD_DRV_LOG(DEBUG, "vlan double tag disable EDVLP:qinq=0x%x\n", qinq); 1909 } 1910 1911 static int 1912 axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1913 { 1914 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 1915 struct axgbe_port *pdata = dev->data->dev_private; 1916 1917 /* Indicate that VLAN Tx CTAGs come from context descriptors */ 1918 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); 1919 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); 1920 1921 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 1922 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { 1923 PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n", 1924 pdata->eth_dev->device->name); 1925 pdata->hw_if.enable_rx_vlan_stripping(pdata); 1926 } else { 1927 PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n", 1928 pdata->eth_dev->device->name); 1929 pdata->hw_if.disable_rx_vlan_stripping(pdata); 1930 } 1931 } 1932 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 1933 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 1934 PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n", 1935 pdata->eth_dev->device->name); 1936 pdata->hw_if.enable_rx_vlan_filtering(pdata); 1937 } else { 1938 PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n", 1939 pdata->eth_dev->device->name); 1940 pdata->hw_if.disable_rx_vlan_filtering(pdata); 1941 } 1942 } 1943 if (mask & RTE_ETH_VLAN_EXTEND_MASK) { 1944 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) { 1945 PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n"); 1946 axgbe_vlan_extend_enable(pdata); 1947 /* Set global registers with default ethertype*/ 1948 axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER, 1949 RTE_ETHER_TYPE_VLAN); 1950 axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER, 1951 RTE_ETHER_TYPE_VLAN); 1952 } else { 1953 PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n"); 1954 axgbe_vlan_extend_disable(pdata); 1955 } 1956 } 1957 return 0; 1958 } 1959 1960 static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 1961 { 1962 unsigned int mac_hfr0, mac_hfr1, mac_hfr2, mac_hfr3; 1963 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 1964 1965 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 1966 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 1967 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 1968 mac_hfr3 = AXGMAC_IOREAD(pdata, MAC_HWF3R); 1969 1970 memset(hw_feat, 0, sizeof(*hw_feat)); 1971 1972 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 1973 1974 /* Hardware feature register 0 */ 1975 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 1976 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 1977 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 1978 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 1979 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 1980 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 1981 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 1982 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 1983 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 1984 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 1985 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 1986 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 1987 ADDMACADRSEL); 1988 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 1989 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 1990 1991 /* Hardware feature register 1 */ 1992 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1993 RXFIFOSIZE); 1994 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1995 TXFIFOSIZE); 1996 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 1997 MAC_HWF1R, ADVTHWORD); 1998 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 1999 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 2000 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 2001 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 2002 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 2003 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 2004 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 2005 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 2006 HASHTBLSZ); 2007 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 2008 L3L4FNUM); 2009 2010 /* Hardware feature register 2 */ 2011 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 2012 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 2013 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 2014 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 2015 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 2016 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 2017 AUXSNAPNUM); 2018 2019 /* Hardware feature register 3 */ 2020 hw_feat->tx_q_vlan_tag_ins = AXGMAC_GET_BITS(mac_hfr3, 2021 MAC_HWF3R, CBTISEL); 2022 hw_feat->no_of_vlan_extn = AXGMAC_GET_BITS(mac_hfr3, 2023 MAC_HWF3R, NRVF); 2024 2025 /* Translate the Hash Table size into actual number */ 2026 switch (hw_feat->hash_table_size) { 2027 case 0: 2028 break; 2029 case 1: 2030 hw_feat->hash_table_size = 64; 2031 break; 2032 case 2: 2033 hw_feat->hash_table_size = 128; 2034 break; 2035 case 3: 2036 hw_feat->hash_table_size = 256; 2037 break; 2038 } 2039 2040 /* Translate the address width setting into actual number */ 2041 switch (hw_feat->dma_width) { 2042 case 0: 2043 hw_feat->dma_width = 32; 2044 break; 2045 case 1: 2046 hw_feat->dma_width = 40; 2047 break; 2048 case 2: 2049 hw_feat->dma_width = 48; 2050 break; 2051 default: 2052 hw_feat->dma_width = 32; 2053 } 2054 2055 /* The Queue, Channel and TC counts are zero based so increment them 2056 * to get the actual number 2057 */ 2058 hw_feat->rx_q_cnt++; 2059 hw_feat->tx_q_cnt++; 2060 hw_feat->rx_ch_cnt++; 2061 hw_feat->tx_ch_cnt++; 2062 hw_feat->tc_cnt++; 2063 2064 /* Translate the fifo sizes into actual numbers */ 2065 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 2066 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 2067 } 2068 2069 static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 2070 { 2071 axgbe_init_function_ptrs_dev(&pdata->hw_if); 2072 axgbe_init_function_ptrs_phy(&pdata->phy_if); 2073 axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 2074 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 2075 } 2076 2077 static void axgbe_set_counts(struct axgbe_port *pdata) 2078 { 2079 /* Set all the function pointers */ 2080 axgbe_init_all_fptrs(pdata); 2081 2082 /* Populate the hardware features */ 2083 axgbe_get_all_hw_features(pdata); 2084 2085 /* Set default max values if not provided */ 2086 if (!pdata->tx_max_channel_count) 2087 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 2088 if (!pdata->rx_max_channel_count) 2089 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 2090 2091 if (!pdata->tx_max_q_count) 2092 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 2093 if (!pdata->rx_max_q_count) 2094 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 2095 2096 /* Calculate the number of Tx and Rx rings to be created 2097 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 2098 * the number of Tx queues to the number of Tx channels 2099 * enabled 2100 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 2101 * number of Rx queues or maximum allowed 2102 */ 2103 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 2104 pdata->tx_max_channel_count); 2105 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 2106 pdata->tx_max_q_count); 2107 2108 pdata->tx_q_count = pdata->tx_ring_count; 2109 2110 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 2111 pdata->rx_max_channel_count); 2112 2113 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 2114 pdata->rx_max_q_count); 2115 } 2116 2117 static void axgbe_default_config(struct axgbe_port *pdata) 2118 { 2119 pdata->pblx8 = DMA_PBL_X8_ENABLE; 2120 pdata->tx_sf_mode = MTL_TSF_ENABLE; 2121 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 2122 pdata->tx_pbl = DMA_PBL_32; 2123 pdata->tx_osp_mode = DMA_OSP_ENABLE; 2124 pdata->rx_sf_mode = MTL_RSF_ENABLE; 2125 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 2126 pdata->rx_pbl = DMA_PBL_32; 2127 pdata->pause_autoneg = 1; 2128 pdata->tx_pause = 0; 2129 pdata->rx_pause = 0; 2130 pdata->phy_speed = SPEED_UNKNOWN; 2131 pdata->power_down = 0; 2132 } 2133 2134 /* Used in dev_start by primary process and then 2135 * in dev_init by secondary process when attaching to an existing ethdev. 2136 */ 2137 void 2138 axgbe_set_tx_function(struct rte_eth_dev *dev) 2139 { 2140 struct axgbe_port *pdata = dev->data->dev_private; 2141 2142 dev->tx_pkt_burst = &axgbe_xmit_pkts; 2143 2144 if (pdata->multi_segs_tx) 2145 dev->tx_pkt_burst = &axgbe_xmit_pkts_seg; 2146 #ifdef RTE_ARCH_X86 2147 struct axgbe_tx_queue *txq = dev->data->tx_queues[0]; 2148 if (!txq->vector_disable && 2149 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) 2150 dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; 2151 #endif 2152 } 2153 2154 void 2155 axgbe_set_rx_function(struct rte_eth_dev *dev) 2156 { 2157 struct rte_eth_dev_data *dev_data = dev->data; 2158 uint16_t max_pkt_len; 2159 struct axgbe_port *pdata; 2160 2161 pdata = dev->data->dev_private; 2162 max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 2163 if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) || 2164 max_pkt_len > pdata->rx_buf_size) 2165 dev_data->scattered_rx = 1; 2166 /* Scatter Rx handling */ 2167 if (dev_data->scattered_rx) 2168 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; 2169 else 2170 dev->rx_pkt_burst = &axgbe_recv_pkts; 2171 } 2172 2173 /* 2174 * It returns 0 on success. 2175 */ 2176 static int 2177 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 2178 { 2179 PMD_INIT_FUNC_TRACE(); 2180 struct axgbe_port *pdata; 2181 struct rte_pci_device *pci_dev; 2182 uint32_t reg, mac_lo, mac_hi; 2183 uint32_t len; 2184 int ret; 2185 2186 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; 2187 unsigned char cpu_family = 0, cpu_model = 0; 2188 2189 eth_dev->dev_ops = &axgbe_eth_dev_ops; 2190 2191 eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status; 2192 eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status; 2193 2194 eth_dev->tx_pkt_burst = &axgbe_xmit_pkts; 2195 eth_dev->rx_pkt_burst = &axgbe_recv_pkts; 2196 2197 /* 2198 * For secondary processes, we don't initialise any further as primary 2199 * has already done this work. 2200 */ 2201 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2202 axgbe_set_tx_function(eth_dev); 2203 axgbe_set_rx_function(eth_dev); 2204 return 0; 2205 } 2206 2207 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2208 2209 pdata = eth_dev->data->dev_private; 2210 /* initial state */ 2211 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state); 2212 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state); 2213 pdata->eth_dev = eth_dev; 2214 2215 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2216 pdata->pci_dev = pci_dev; 2217 2218 pdata->xgmac_regs = 2219 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 2220 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 2221 + AXGBE_MAC_PROP_OFFSET); 2222 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 2223 + AXGBE_I2C_CTRL_OFFSET); 2224 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 2225 2226 /* version specific driver data*/ 2227 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 2228 pdata->vdata = &axgbe_v2a; 2229 else 2230 pdata->vdata = &axgbe_v2b; 2231 2232 /* 2233 * Use CPUID to get Family and model ID to identify the CPU 2234 */ 2235 __cpuid(0x0, eax, ebx, ecx, edx); 2236 2237 if (ebx == CPUID_VENDOR_AuthenticAMD_ebx && 2238 edx == CPUID_VENDOR_AuthenticAMD_edx && 2239 ecx == CPUID_VENDOR_AuthenticAMD_ecx) { 2240 int unknown_cpu = 0; 2241 eax = 0, ebx = 0, ecx = 0, edx = 0; 2242 2243 __cpuid(0x1, eax, ebx, ecx, edx); 2244 2245 cpu_family = ((GET_BITS(eax, 8, 4)) + (GET_BITS(eax, 20, 8))); 2246 cpu_model = ((GET_BITS(eax, 4, 4)) | (((GET_BITS(eax, 16, 4)) << 4) & 0xF0)); 2247 2248 switch (cpu_family) { 2249 case Fam17h: 2250 /* V1000/R1000 */ 2251 if (cpu_model >= 0x10 && cpu_model <= 0x1F) { 2252 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; 2253 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; 2254 /* EPYC 3000 */ 2255 } else if (cpu_model >= 0x01 && cpu_model <= 0x0F) { 2256 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 2257 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 2258 } else { 2259 unknown_cpu = 1; 2260 } 2261 break; 2262 case Fam19h: 2263 /* V3000 (Yellow Carp) */ 2264 if (cpu_model >= 0x44 && cpu_model <= 0x47) { 2265 pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF; 2266 pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT; 2267 2268 /* Yellow Carp devices do not need cdr workaround */ 2269 pdata->vdata->an_cdr_workaround = 0; 2270 } else { 2271 unknown_cpu = 1; 2272 } 2273 break; 2274 default: 2275 unknown_cpu = 1; 2276 break; 2277 } 2278 if (unknown_cpu) { 2279 PMD_DRV_LOG(ERR, "Unknown CPU family, no supported axgbe device found\n"); 2280 return -ENODEV; 2281 } 2282 } 2283 2284 /* Configure the PCS indirect addressing support */ 2285 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); 2286 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 2287 pdata->xpcs_window <<= 6; 2288 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 2289 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 2290 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 2291 2292 PMD_INIT_LOG(DEBUG, 2293 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 2294 pdata->xpcs_window_size, pdata->xpcs_window_mask); 2295 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 2296 2297 /* Retrieve the MAC address */ 2298 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 2299 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 2300 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 2301 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 2302 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 2303 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 2304 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 2305 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 2306 2307 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS; 2308 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0); 2309 2310 if (!eth_dev->data->mac_addrs) { 2311 PMD_INIT_LOG(ERR, 2312 "Failed to alloc %u bytes needed to " 2313 "store MAC addresses", len); 2314 return -ENOMEM; 2315 } 2316 2317 /* Allocate memory for storing hash filter MAC addresses */ 2318 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS; 2319 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr", 2320 len, 0); 2321 2322 if (eth_dev->data->hash_mac_addrs == NULL) { 2323 PMD_INIT_LOG(ERR, 2324 "Failed to allocate %d bytes needed to " 2325 "store MAC addresses", len); 2326 return -ENOMEM; 2327 } 2328 2329 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 2330 rte_eth_random_addr(pdata->mac_addr.addr_bytes); 2331 2332 /* Copy the permanent MAC address */ 2333 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 2334 2335 /* Clock settings */ 2336 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 2337 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 2338 2339 /* Set the DMA coherency values */ 2340 pdata->coherent = 1; 2341 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 2342 pdata->arcache = AXGBE_DMA_OS_ARCACHE; 2343 pdata->awcache = AXGBE_DMA_OS_AWCACHE; 2344 2345 /* Set the maximum channels and queues */ 2346 reg = XP_IOREAD(pdata, XP_PROP_1); 2347 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 2348 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 2349 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 2350 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 2351 2352 /* Set the hardware channel and queue counts */ 2353 axgbe_set_counts(pdata); 2354 2355 /* Set the maximum fifo amounts */ 2356 reg = XP_IOREAD(pdata, XP_PROP_2); 2357 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 2358 pdata->tx_max_fifo_size *= 16384; 2359 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 2360 pdata->vdata->tx_max_fifo_size); 2361 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 2362 pdata->rx_max_fifo_size *= 16384; 2363 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 2364 pdata->vdata->rx_max_fifo_size); 2365 /* Issue software reset to DMA */ 2366 ret = pdata->hw_if.exit(pdata); 2367 if (ret) 2368 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 2369 2370 /* Set default configuration data */ 2371 axgbe_default_config(pdata); 2372 2373 /* Set default max values if not provided */ 2374 if (!pdata->tx_max_fifo_size) 2375 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 2376 if (!pdata->rx_max_fifo_size) 2377 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 2378 2379 pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 2380 pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 2381 pthread_mutex_init(&pdata->xpcs_mutex, NULL); 2382 pthread_mutex_init(&pdata->i2c_mutex, NULL); 2383 pthread_mutex_init(&pdata->an_mutex, NULL); 2384 pthread_mutex_init(&pdata->phy_mutex, NULL); 2385 2386 ret = pdata->phy_if.phy_init(pdata); 2387 if (ret) { 2388 rte_free(eth_dev->data->mac_addrs); 2389 eth_dev->data->mac_addrs = NULL; 2390 return ret; 2391 } 2392 2393 rte_intr_callback_register(pci_dev->intr_handle, 2394 axgbe_dev_interrupt_handler, 2395 (void *)eth_dev); 2396 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 2397 eth_dev->data->port_id, pci_dev->id.vendor_id, 2398 pci_dev->id.device_id); 2399 2400 return 0; 2401 } 2402 2403 static int 2404 axgbe_dev_close(struct rte_eth_dev *eth_dev) 2405 { 2406 struct rte_pci_device *pci_dev; 2407 2408 PMD_INIT_FUNC_TRACE(); 2409 2410 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2411 return 0; 2412 2413 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2414 axgbe_dev_clear_queues(eth_dev); 2415 2416 /* disable uio intr before callback unregister */ 2417 rte_intr_disable(pci_dev->intr_handle); 2418 rte_intr_callback_unregister(pci_dev->intr_handle, 2419 axgbe_dev_interrupt_handler, 2420 (void *)eth_dev); 2421 2422 return 0; 2423 } 2424 2425 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2426 struct rte_pci_device *pci_dev) 2427 { 2428 return rte_eth_dev_pci_generic_probe(pci_dev, 2429 sizeof(struct axgbe_port), eth_axgbe_dev_init); 2430 } 2431 2432 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 2433 { 2434 return rte_eth_dev_pci_generic_remove(pci_dev, axgbe_dev_close); 2435 } 2436 2437 static struct rte_pci_driver rte_axgbe_pmd = { 2438 .id_table = pci_id_axgbe_map, 2439 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 2440 .probe = eth_axgbe_pci_probe, 2441 .remove = eth_axgbe_pci_remove, 2442 }; 2443 2444 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 2445 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 2446 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 2447 RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_init, init, NOTICE); 2448 RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_driver, driver, NOTICE); 2449