1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4 */ 5 6 #include "axgbe_rxtx.h" 7 #include "axgbe_ethdev.h" 8 #include "axgbe_common.h" 9 #include "axgbe_phy.h" 10 #include "axgbe_regs.h" 11 #include "rte_time.h" 12 13 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 14 static int axgbe_dev_configure(struct rte_eth_dev *dev); 15 static int axgbe_dev_start(struct rte_eth_dev *dev); 16 static int axgbe_dev_stop(struct rte_eth_dev *dev); 17 static void axgbe_dev_interrupt_handler(void *param); 18 static int axgbe_dev_close(struct rte_eth_dev *dev); 19 static int axgbe_dev_reset(struct rte_eth_dev *dev); 20 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 21 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 22 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 23 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 24 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, 25 struct rte_ether_addr *mac_addr); 26 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, 27 struct rte_ether_addr *mac_addr, 28 uint32_t index, 29 uint32_t vmdq); 30 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); 31 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 32 struct rte_ether_addr *mc_addr_set, 33 uint32_t nb_mc_addr); 34 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 35 struct rte_ether_addr *mac_addr, 36 uint8_t add); 37 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, 38 uint8_t add); 39 static int axgbe_dev_link_update(struct rte_eth_dev *dev, 40 int wait_to_complete); 41 static int axgbe_dev_get_regs(struct rte_eth_dev *dev, 42 struct rte_dev_reg_info *regs); 43 static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 44 struct rte_eth_stats *stats); 45 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev); 46 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev, 47 struct rte_eth_xstat *stats, 48 unsigned int n); 49 static int 50 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 51 struct rte_eth_xstat_name *xstats_names, 52 unsigned int size); 53 static int 54 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, 55 const uint64_t *ids, 56 uint64_t *values, 57 unsigned int n); 58 static int 59 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 60 struct rte_eth_xstat_name *xstats_names, 61 const uint64_t *ids, 62 unsigned int size); 63 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev); 64 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 65 struct rte_eth_rss_reta_entry64 *reta_conf, 66 uint16_t reta_size); 67 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 68 struct rte_eth_rss_reta_entry64 *reta_conf, 69 uint16_t reta_size); 70 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 71 struct rte_eth_rss_conf *rss_conf); 72 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 73 struct rte_eth_rss_conf *rss_conf); 74 static int axgbe_dev_info_get(struct rte_eth_dev *dev, 75 struct rte_eth_dev_info *dev_info); 76 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev, 77 struct rte_eth_fc_conf *fc_conf); 78 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev, 79 struct rte_eth_fc_conf *fc_conf); 80 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 81 struct rte_eth_pfc_conf *pfc_conf); 82 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 83 struct rte_eth_rxq_info *qinfo); 84 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 85 struct rte_eth_txq_info *qinfo); 86 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 87 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 88 89 static int 90 axgbe_timesync_enable(struct rte_eth_dev *dev); 91 static int 92 axgbe_timesync_disable(struct rte_eth_dev *dev); 93 static int 94 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 95 struct timespec *timestamp, uint32_t flags); 96 static int 97 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 98 struct timespec *timestamp); 99 static int 100 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 101 static int 102 axgbe_timesync_read_time(struct rte_eth_dev *dev, 103 struct timespec *timestamp); 104 static int 105 axgbe_timesync_write_time(struct rte_eth_dev *dev, 106 const struct timespec *timestamp); 107 static void 108 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec, 109 unsigned int nsec); 110 static void 111 axgbe_update_tstamp_addend(struct axgbe_port *pdata, 112 unsigned int addend); 113 static int 114 axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on); 115 static int axgbe_vlan_tpid_set(struct rte_eth_dev *dev, 116 enum rte_vlan_type vlan_type, uint16_t tpid); 117 static int axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 118 119 struct axgbe_xstats { 120 char name[RTE_ETH_XSTATS_NAME_SIZE]; 121 int offset; 122 }; 123 124 #define AXGMAC_MMC_STAT(_string, _var) \ 125 { _string, \ 126 offsetof(struct axgbe_mmc_stats, _var), \ 127 } 128 129 static const struct axgbe_xstats axgbe_xstats_strings[] = { 130 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), 131 AXGMAC_MMC_STAT("tx_packets", txframecount_gb), 132 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), 133 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), 134 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), 135 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), 136 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), 137 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), 138 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), 139 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), 140 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), 141 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), 142 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), 143 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes), 144 145 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), 146 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb), 147 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), 148 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), 149 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), 150 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), 151 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), 152 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), 153 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), 154 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), 155 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), 156 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), 157 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), 158 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), 159 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), 160 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), 161 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), 162 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror), 163 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), 164 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), 165 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), 166 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), 167 }; 168 169 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings) 170 171 /* The set of PCI devices this driver supports */ 172 #define AMD_PCI_VENDOR_ID 0x1022 173 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0 174 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 175 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 176 177 static const struct rte_pci_id pci_id_axgbe_map[] = { 178 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 179 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 180 { .vendor_id = 0, }, 181 }; 182 183 static struct axgbe_version_data axgbe_v2a = { 184 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 185 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 186 .mmc_64bit = 1, 187 .tx_max_fifo_size = 229376, 188 .rx_max_fifo_size = 229376, 189 .tx_tstamp_workaround = 1, 190 .ecc_support = 1, 191 .i2c_support = 1, 192 .an_cdr_workaround = 1, 193 }; 194 195 static struct axgbe_version_data axgbe_v2b = { 196 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 197 .xpcs_access = AXGBE_XPCS_ACCESS_V2, 198 .mmc_64bit = 1, 199 .tx_max_fifo_size = 65536, 200 .rx_max_fifo_size = 65536, 201 .tx_tstamp_workaround = 1, 202 .ecc_support = 1, 203 .i2c_support = 1, 204 .an_cdr_workaround = 1, 205 }; 206 207 static const struct rte_eth_desc_lim rx_desc_lim = { 208 .nb_max = AXGBE_MAX_RING_DESC, 209 .nb_min = AXGBE_MIN_RING_DESC, 210 .nb_align = 8, 211 }; 212 213 static const struct rte_eth_desc_lim tx_desc_lim = { 214 .nb_max = AXGBE_MAX_RING_DESC, 215 .nb_min = AXGBE_MIN_RING_DESC, 216 .nb_align = 8, 217 }; 218 219 static const struct eth_dev_ops axgbe_eth_dev_ops = { 220 .dev_configure = axgbe_dev_configure, 221 .dev_start = axgbe_dev_start, 222 .dev_stop = axgbe_dev_stop, 223 .dev_close = axgbe_dev_close, 224 .dev_reset = axgbe_dev_reset, 225 .promiscuous_enable = axgbe_dev_promiscuous_enable, 226 .promiscuous_disable = axgbe_dev_promiscuous_disable, 227 .allmulticast_enable = axgbe_dev_allmulticast_enable, 228 .allmulticast_disable = axgbe_dev_allmulticast_disable, 229 .mac_addr_set = axgbe_dev_mac_addr_set, 230 .mac_addr_add = axgbe_dev_mac_addr_add, 231 .mac_addr_remove = axgbe_dev_mac_addr_remove, 232 .set_mc_addr_list = axgbe_dev_set_mc_addr_list, 233 .uc_hash_table_set = axgbe_dev_uc_hash_table_set, 234 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set, 235 .link_update = axgbe_dev_link_update, 236 .get_reg = axgbe_dev_get_regs, 237 .stats_get = axgbe_dev_stats_get, 238 .stats_reset = axgbe_dev_stats_reset, 239 .xstats_get = axgbe_dev_xstats_get, 240 .xstats_reset = axgbe_dev_xstats_reset, 241 .xstats_get_names = axgbe_dev_xstats_get_names, 242 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id, 243 .xstats_get_by_id = axgbe_dev_xstats_get_by_id, 244 .reta_update = axgbe_dev_rss_reta_update, 245 .reta_query = axgbe_dev_rss_reta_query, 246 .rss_hash_update = axgbe_dev_rss_hash_update, 247 .rss_hash_conf_get = axgbe_dev_rss_hash_conf_get, 248 .dev_infos_get = axgbe_dev_info_get, 249 .rx_queue_setup = axgbe_dev_rx_queue_setup, 250 .rx_queue_release = axgbe_dev_rx_queue_release, 251 .tx_queue_setup = axgbe_dev_tx_queue_setup, 252 .tx_queue_release = axgbe_dev_tx_queue_release, 253 .flow_ctrl_get = axgbe_flow_ctrl_get, 254 .flow_ctrl_set = axgbe_flow_ctrl_set, 255 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set, 256 .rxq_info_get = axgbe_rxq_info_get, 257 .txq_info_get = axgbe_txq_info_get, 258 .dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get, 259 .mtu_set = axgb_mtu_set, 260 .vlan_filter_set = axgbe_vlan_filter_set, 261 .vlan_tpid_set = axgbe_vlan_tpid_set, 262 .vlan_offload_set = axgbe_vlan_offload_set, 263 .timesync_enable = axgbe_timesync_enable, 264 .timesync_disable = axgbe_timesync_disable, 265 .timesync_read_rx_timestamp = axgbe_timesync_read_rx_timestamp, 266 .timesync_read_tx_timestamp = axgbe_timesync_read_tx_timestamp, 267 .timesync_adjust_time = axgbe_timesync_adjust_time, 268 .timesync_read_time = axgbe_timesync_read_time, 269 .timesync_write_time = axgbe_timesync_write_time, 270 }; 271 272 static int axgbe_phy_reset(struct axgbe_port *pdata) 273 { 274 pdata->phy_link = -1; 275 pdata->phy_speed = SPEED_UNKNOWN; 276 return pdata->phy_if.phy_reset(pdata); 277 } 278 279 /* 280 * Interrupt handler triggered by NIC for handling 281 * specific interrupt. 282 * 283 * @param handle 284 * Pointer to interrupt handle. 285 * @param param 286 * The address of parameter (struct rte_eth_dev *) regsitered before. 287 * 288 * @return 289 * void 290 */ 291 static void 292 axgbe_dev_interrupt_handler(void *param) 293 { 294 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 295 struct axgbe_port *pdata = dev->data->dev_private; 296 unsigned int dma_isr, dma_ch_isr; 297 298 pdata->phy_if.an_isr(pdata); 299 /*DMA related interrupts*/ 300 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 301 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr); 302 if (dma_isr) { 303 if (dma_isr & 1) { 304 dma_ch_isr = 305 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 306 pdata->rx_queues[0], 307 DMA_CH_SR); 308 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr); 309 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 310 pdata->rx_queues[0], 311 DMA_CH_SR, dma_ch_isr); 312 } 313 } 314 /* Unmask interrupts since disabled after generation */ 315 rte_intr_ack(&pdata->pci_dev->intr_handle); 316 } 317 318 /* 319 * Configure device link speed and setup link. 320 * It returns 0 on success. 321 */ 322 static int 323 axgbe_dev_configure(struct rte_eth_dev *dev) 324 { 325 struct axgbe_port *pdata = dev->data->dev_private; 326 /* Checksum offload to hardware */ 327 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 328 DEV_RX_OFFLOAD_CHECKSUM; 329 return 0; 330 } 331 332 static int 333 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 334 { 335 struct axgbe_port *pdata = dev->data->dev_private; 336 337 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 338 pdata->rss_enable = 1; 339 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 340 pdata->rss_enable = 0; 341 else 342 return -1; 343 return 0; 344 } 345 346 static int 347 axgbe_dev_start(struct rte_eth_dev *dev) 348 { 349 struct axgbe_port *pdata = dev->data->dev_private; 350 int ret; 351 struct rte_eth_dev_data *dev_data = dev->data; 352 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len; 353 354 dev->dev_ops = &axgbe_eth_dev_ops; 355 356 PMD_INIT_FUNC_TRACE(); 357 358 /* Multiqueue RSS */ 359 ret = axgbe_dev_rx_mq_config(dev); 360 if (ret) { 361 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 362 return ret; 363 } 364 ret = axgbe_phy_reset(pdata); 365 if (ret) { 366 PMD_DRV_LOG(ERR, "phy reset failed\n"); 367 return ret; 368 } 369 ret = pdata->hw_if.init(pdata); 370 if (ret) { 371 PMD_DRV_LOG(ERR, "dev_init failed\n"); 372 return ret; 373 } 374 375 /* enable uio/vfio intr/eventfd mapping */ 376 rte_intr_enable(&pdata->pci_dev->intr_handle); 377 378 /* phy start*/ 379 pdata->phy_if.phy_start(pdata); 380 axgbe_dev_enable_tx(dev); 381 axgbe_dev_enable_rx(dev); 382 383 rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state); 384 rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state); 385 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || 386 max_pkt_len > pdata->rx_buf_size) 387 dev_data->scattered_rx = 1; 388 389 /* Scatter Rx handling */ 390 if (dev_data->scattered_rx) 391 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; 392 else 393 dev->rx_pkt_burst = &axgbe_recv_pkts; 394 395 return 0; 396 } 397 398 /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 399 static int 400 axgbe_dev_stop(struct rte_eth_dev *dev) 401 { 402 struct axgbe_port *pdata = dev->data->dev_private; 403 404 PMD_INIT_FUNC_TRACE(); 405 406 rte_intr_disable(&pdata->pci_dev->intr_handle); 407 408 if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state)) 409 return 0; 410 411 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state); 412 axgbe_dev_disable_tx(dev); 413 axgbe_dev_disable_rx(dev); 414 415 pdata->phy_if.phy_stop(pdata); 416 pdata->hw_if.exit(pdata); 417 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 418 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state); 419 420 return 0; 421 } 422 423 static int 424 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 425 { 426 struct axgbe_port *pdata = dev->data->dev_private; 427 428 PMD_INIT_FUNC_TRACE(); 429 430 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 431 432 return 0; 433 } 434 435 static int 436 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 437 { 438 struct axgbe_port *pdata = dev->data->dev_private; 439 440 PMD_INIT_FUNC_TRACE(); 441 442 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 443 444 return 0; 445 } 446 447 static int 448 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 449 { 450 struct axgbe_port *pdata = dev->data->dev_private; 451 452 PMD_INIT_FUNC_TRACE(); 453 454 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 455 return 0; 456 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 457 458 return 0; 459 } 460 461 static int 462 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 463 { 464 struct axgbe_port *pdata = dev->data->dev_private; 465 466 PMD_INIT_FUNC_TRACE(); 467 468 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 469 return 0; 470 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 471 472 return 0; 473 } 474 475 static int 476 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 477 { 478 struct axgbe_port *pdata = dev->data->dev_private; 479 480 /* Set Default MAC Addr */ 481 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0); 482 483 return 0; 484 } 485 486 static int 487 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 488 uint32_t index, uint32_t pool __rte_unused) 489 { 490 struct axgbe_port *pdata = dev->data->dev_private; 491 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 492 493 if (index > hw_feat->addn_mac) { 494 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 495 return -EINVAL; 496 } 497 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index); 498 return 0; 499 } 500 501 static int 502 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 503 struct rte_eth_rss_reta_entry64 *reta_conf, 504 uint16_t reta_size) 505 { 506 struct axgbe_port *pdata = dev->data->dev_private; 507 unsigned int i, idx, shift; 508 int ret; 509 510 if (!pdata->rss_enable) { 511 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 512 return -ENOTSUP; 513 } 514 515 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 516 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 517 return -EINVAL; 518 } 519 520 for (i = 0; i < reta_size; i++) { 521 idx = i / RTE_RETA_GROUP_SIZE; 522 shift = i % RTE_RETA_GROUP_SIZE; 523 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 524 continue; 525 pdata->rss_table[i] = reta_conf[idx].reta[shift]; 526 } 527 528 /* Program the lookup table */ 529 ret = axgbe_write_rss_lookup_table(pdata); 530 return ret; 531 } 532 533 static int 534 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 535 struct rte_eth_rss_reta_entry64 *reta_conf, 536 uint16_t reta_size) 537 { 538 struct axgbe_port *pdata = dev->data->dev_private; 539 unsigned int i, idx, shift; 540 541 if (!pdata->rss_enable) { 542 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 543 return -ENOTSUP; 544 } 545 546 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) { 547 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size); 548 return -EINVAL; 549 } 550 551 for (i = 0; i < reta_size; i++) { 552 idx = i / RTE_RETA_GROUP_SIZE; 553 shift = i % RTE_RETA_GROUP_SIZE; 554 if ((reta_conf[idx].mask & (1ULL << shift)) == 0) 555 continue; 556 reta_conf[idx].reta[shift] = pdata->rss_table[i]; 557 } 558 return 0; 559 } 560 561 static int 562 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 563 struct rte_eth_rss_conf *rss_conf) 564 { 565 struct axgbe_port *pdata = dev->data->dev_private; 566 int ret; 567 568 if (!pdata->rss_enable) { 569 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 570 return -ENOTSUP; 571 } 572 573 if (rss_conf == NULL) { 574 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 575 return -EINVAL; 576 } 577 578 if (rss_conf->rss_key != NULL && 579 rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) { 580 rte_memcpy(pdata->rss_key, rss_conf->rss_key, 581 AXGBE_RSS_HASH_KEY_SIZE); 582 /* Program the hash key */ 583 ret = axgbe_write_rss_hash_key(pdata); 584 if (ret != 0) 585 return ret; 586 } 587 588 pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD; 589 590 if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6)) 591 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 592 if (pdata->rss_hf & 593 (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP)) 594 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); 595 if (pdata->rss_hf & 596 (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP)) 597 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 598 599 /* Set the RSS options */ 600 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 601 602 return 0; 603 } 604 605 static int 606 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 607 struct rte_eth_rss_conf *rss_conf) 608 { 609 struct axgbe_port *pdata = dev->data->dev_private; 610 611 if (!pdata->rss_enable) { 612 PMD_DRV_LOG(ERR, "RSS not enabled\n"); 613 return -ENOTSUP; 614 } 615 616 if (rss_conf == NULL) { 617 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n"); 618 return -EINVAL; 619 } 620 621 if (rss_conf->rss_key != NULL && 622 rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) { 623 rte_memcpy(rss_conf->rss_key, pdata->rss_key, 624 AXGBE_RSS_HASH_KEY_SIZE); 625 } 626 rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE; 627 rss_conf->rss_hf = pdata->rss_hf; 628 return 0; 629 } 630 631 static int 632 axgbe_dev_reset(struct rte_eth_dev *dev) 633 { 634 int ret = 0; 635 636 ret = axgbe_dev_close(dev); 637 if (ret) 638 return ret; 639 640 ret = eth_axgbe_dev_init(dev); 641 642 return ret; 643 } 644 645 static void 646 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 647 { 648 struct axgbe_port *pdata = dev->data->dev_private; 649 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 650 651 if (index > hw_feat->addn_mac) { 652 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index); 653 return; 654 } 655 axgbe_set_mac_addn_addr(pdata, NULL, index); 656 } 657 658 static int 659 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 660 struct rte_ether_addr *mc_addr_set, 661 uint32_t nb_mc_addr) 662 { 663 struct axgbe_port *pdata = dev->data->dev_private; 664 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 665 uint32_t index = 1; /* 0 is always default mac */ 666 uint32_t i; 667 668 if (nb_mc_addr > hw_feat->addn_mac) { 669 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr); 670 return -EINVAL; 671 } 672 673 /* clear unicast addresses */ 674 for (i = 1; i < hw_feat->addn_mac; i++) { 675 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i])) 676 continue; 677 memset(&dev->data->mac_addrs[i], 0, 678 sizeof(struct rte_ether_addr)); 679 } 680 681 while (nb_mc_addr--) 682 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++); 683 684 return 0; 685 } 686 687 static int 688 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev, 689 struct rte_ether_addr *mac_addr, uint8_t add) 690 { 691 struct axgbe_port *pdata = dev->data->dev_private; 692 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 693 694 if (!hw_feat->hash_table_size) { 695 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 696 return -ENOTSUP; 697 } 698 699 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add); 700 701 if (pdata->uc_hash_mac_addr > 0) { 702 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 703 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 704 } else { 705 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 706 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 707 } 708 return 0; 709 } 710 711 static int 712 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add) 713 { 714 struct axgbe_port *pdata = dev->data->dev_private; 715 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 716 uint32_t index; 717 718 if (!hw_feat->hash_table_size) { 719 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n"); 720 return -ENOTSUP; 721 } 722 723 for (index = 0; index < pdata->hash_table_count; index++) { 724 if (add) 725 pdata->uc_hash_table[index] = ~0; 726 else 727 pdata->uc_hash_table[index] = 0; 728 729 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n", 730 add ? "set" : "clear", index); 731 732 AXGMAC_IOWRITE(pdata, MAC_HTR(index), 733 pdata->uc_hash_table[index]); 734 } 735 736 if (add) { 737 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 738 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 739 } else { 740 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0); 741 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0); 742 } 743 return 0; 744 } 745 746 /* return 0 means link status changed, -1 means not changed */ 747 static int 748 axgbe_dev_link_update(struct rte_eth_dev *dev, 749 int wait_to_complete __rte_unused) 750 { 751 struct axgbe_port *pdata = dev->data->dev_private; 752 struct rte_eth_link link; 753 int ret = 0; 754 755 PMD_INIT_FUNC_TRACE(); 756 rte_delay_ms(800); 757 758 pdata->phy_if.phy_status(pdata); 759 760 memset(&link, 0, sizeof(struct rte_eth_link)); 761 link.link_duplex = pdata->phy.duplex; 762 link.link_status = pdata->phy_link; 763 link.link_speed = pdata->phy_speed; 764 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 765 ETH_LINK_SPEED_FIXED); 766 ret = rte_eth_linkstatus_set(dev, &link); 767 if (ret == -1) 768 PMD_DRV_LOG(ERR, "No change in link status\n"); 769 770 return ret; 771 } 772 773 static int 774 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) 775 { 776 struct axgbe_port *pdata = dev->data->dev_private; 777 778 if (regs->data == NULL) { 779 regs->length = axgbe_regs_get_count(pdata); 780 regs->width = sizeof(uint32_t); 781 return 0; 782 } 783 784 /* Only full register dump is supported */ 785 if (regs->length && 786 regs->length != (uint32_t)axgbe_regs_get_count(pdata)) 787 return -ENOTSUP; 788 789 regs->version = pdata->pci_dev->id.vendor_id << 16 | 790 pdata->pci_dev->id.device_id; 791 axgbe_regs_dump(pdata, regs->data); 792 return 0; 793 } 794 static void axgbe_read_mmc_stats(struct axgbe_port *pdata) 795 { 796 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 797 798 /* Freeze counters */ 799 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 800 801 /* Tx counters */ 802 stats->txoctetcount_gb += 803 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 804 stats->txoctetcount_gb += 805 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32); 806 807 stats->txframecount_gb += 808 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 809 stats->txframecount_gb += 810 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32); 811 812 stats->txbroadcastframes_g += 813 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 814 stats->txbroadcastframes_g += 815 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32); 816 817 stats->txmulticastframes_g += 818 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 819 stats->txmulticastframes_g += 820 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32); 821 822 stats->tx64octets_gb += 823 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 824 stats->tx64octets_gb += 825 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32); 826 827 stats->tx65to127octets_gb += 828 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 829 stats->tx65to127octets_gb += 830 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32); 831 832 stats->tx128to255octets_gb += 833 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 834 stats->tx128to255octets_gb += 835 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32); 836 837 stats->tx256to511octets_gb += 838 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 839 stats->tx256to511octets_gb += 840 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32); 841 842 stats->tx512to1023octets_gb += 843 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 844 stats->tx512to1023octets_gb += 845 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32); 846 847 stats->tx1024tomaxoctets_gb += 848 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 849 stats->tx1024tomaxoctets_gb += 850 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32); 851 852 stats->txunicastframes_gb += 853 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 854 stats->txunicastframes_gb += 855 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32); 856 857 stats->txmulticastframes_gb += 858 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 859 stats->txmulticastframes_gb += 860 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32); 861 862 stats->txbroadcastframes_g += 863 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 864 stats->txbroadcastframes_g += 865 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32); 866 867 stats->txunderflowerror += 868 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 869 stats->txunderflowerror += 870 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32); 871 872 stats->txoctetcount_g += 873 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 874 stats->txoctetcount_g += 875 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32); 876 877 stats->txframecount_g += 878 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 879 stats->txframecount_g += 880 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32); 881 882 stats->txpauseframes += 883 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 884 stats->txpauseframes += 885 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32); 886 887 stats->txvlanframes_g += 888 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 889 stats->txvlanframes_g += 890 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32); 891 892 /* Rx counters */ 893 stats->rxframecount_gb += 894 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 895 stats->rxframecount_gb += 896 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32); 897 898 stats->rxoctetcount_gb += 899 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 900 stats->rxoctetcount_gb += 901 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32); 902 903 stats->rxoctetcount_g += 904 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 905 stats->rxoctetcount_g += 906 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32); 907 908 stats->rxbroadcastframes_g += 909 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 910 stats->rxbroadcastframes_g += 911 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32); 912 913 stats->rxmulticastframes_g += 914 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 915 stats->rxmulticastframes_g += 916 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32); 917 918 stats->rxcrcerror += 919 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 920 stats->rxcrcerror += 921 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32); 922 923 stats->rxrunterror += 924 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 925 926 stats->rxjabbererror += 927 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 928 929 stats->rxundersize_g += 930 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 931 932 stats->rxoversize_g += 933 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 934 935 stats->rx64octets_gb += 936 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 937 stats->rx64octets_gb += 938 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32); 939 940 stats->rx65to127octets_gb += 941 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 942 stats->rx65to127octets_gb += 943 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32); 944 945 stats->rx128to255octets_gb += 946 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 947 stats->rx128to255octets_gb += 948 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32); 949 950 stats->rx256to511octets_gb += 951 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 952 stats->rx256to511octets_gb += 953 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32); 954 955 stats->rx512to1023octets_gb += 956 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 957 stats->rx512to1023octets_gb += 958 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32); 959 960 stats->rx1024tomaxoctets_gb += 961 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 962 stats->rx1024tomaxoctets_gb += 963 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32); 964 965 stats->rxunicastframes_g += 966 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 967 stats->rxunicastframes_g += 968 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32); 969 970 stats->rxlengtherror += 971 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 972 stats->rxlengtherror += 973 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32); 974 975 stats->rxoutofrangetype += 976 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 977 stats->rxoutofrangetype += 978 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32); 979 980 stats->rxpauseframes += 981 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 982 stats->rxpauseframes += 983 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32); 984 985 stats->rxfifooverflow += 986 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 987 stats->rxfifooverflow += 988 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32); 989 990 stats->rxvlanframes_gb += 991 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 992 stats->rxvlanframes_gb += 993 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32); 994 995 stats->rxwatchdogerror += 996 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 997 998 /* Un-freeze counters */ 999 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 1000 } 1001 1002 static int 1003 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, 1004 unsigned int n) 1005 { 1006 struct axgbe_port *pdata = dev->data->dev_private; 1007 unsigned int i; 1008 1009 if (!stats) 1010 return 0; 1011 1012 axgbe_read_mmc_stats(pdata); 1013 1014 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) { 1015 stats[i].id = i; 1016 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats + 1017 axgbe_xstats_strings[i].offset); 1018 } 1019 1020 return i; 1021 } 1022 1023 static int 1024 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1025 struct rte_eth_xstat_name *xstats_names, 1026 unsigned int n) 1027 { 1028 unsigned int i; 1029 1030 if (n >= AXGBE_XSTATS_COUNT && xstats_names) { 1031 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) { 1032 snprintf(xstats_names[i].name, 1033 RTE_ETH_XSTATS_NAME_SIZE, "%s", 1034 axgbe_xstats_strings[i].name); 1035 } 1036 } 1037 1038 return AXGBE_XSTATS_COUNT; 1039 } 1040 1041 static int 1042 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1043 uint64_t *values, unsigned int n) 1044 { 1045 unsigned int i; 1046 uint64_t values_copy[AXGBE_XSTATS_COUNT]; 1047 1048 if (!ids) { 1049 struct axgbe_port *pdata = dev->data->dev_private; 1050 1051 if (n < AXGBE_XSTATS_COUNT) 1052 return AXGBE_XSTATS_COUNT; 1053 1054 axgbe_read_mmc_stats(pdata); 1055 1056 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) { 1057 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats + 1058 axgbe_xstats_strings[i].offset); 1059 } 1060 1061 return i; 1062 } 1063 1064 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT); 1065 1066 for (i = 0; i < n; i++) { 1067 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1068 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1069 return -1; 1070 } 1071 values[i] = values_copy[ids[i]]; 1072 } 1073 return n; 1074 } 1075 1076 static int 1077 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev, 1078 struct rte_eth_xstat_name *xstats_names, 1079 const uint64_t *ids, 1080 unsigned int size) 1081 { 1082 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT]; 1083 unsigned int i; 1084 1085 if (!ids) 1086 return axgbe_dev_xstats_get_names(dev, xstats_names, size); 1087 1088 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size); 1089 1090 for (i = 0; i < size; i++) { 1091 if (ids[i] >= AXGBE_XSTATS_COUNT) { 1092 PMD_DRV_LOG(ERR, "id value isn't valid\n"); 1093 return -1; 1094 } 1095 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1096 } 1097 return size; 1098 } 1099 1100 static int 1101 axgbe_dev_xstats_reset(struct rte_eth_dev *dev) 1102 { 1103 struct axgbe_port *pdata = dev->data->dev_private; 1104 struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 1105 1106 /* MMC registers are configured for reset on read */ 1107 axgbe_read_mmc_stats(pdata); 1108 1109 /* Reset stats */ 1110 memset(stats, 0, sizeof(*stats)); 1111 1112 return 0; 1113 } 1114 1115 static int 1116 axgbe_dev_stats_get(struct rte_eth_dev *dev, 1117 struct rte_eth_stats *stats) 1118 { 1119 struct axgbe_rx_queue *rxq; 1120 struct axgbe_tx_queue *txq; 1121 struct axgbe_port *pdata = dev->data->dev_private; 1122 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats; 1123 unsigned int i; 1124 1125 axgbe_read_mmc_stats(pdata); 1126 1127 stats->imissed = mmc_stats->rxfifooverflow; 1128 1129 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1130 rxq = dev->data->rx_queues[i]; 1131 if (rxq) { 1132 stats->q_ipackets[i] = rxq->pkts; 1133 stats->ipackets += rxq->pkts; 1134 stats->q_ibytes[i] = rxq->bytes; 1135 stats->ibytes += rxq->bytes; 1136 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed; 1137 stats->q_errors[i] = rxq->errors 1138 + rxq->rx_mbuf_alloc_failed; 1139 stats->ierrors += rxq->errors; 1140 } else { 1141 PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n", 1142 dev->data->port_id); 1143 } 1144 } 1145 1146 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1147 txq = dev->data->tx_queues[i]; 1148 if (txq) { 1149 stats->q_opackets[i] = txq->pkts; 1150 stats->opackets += txq->pkts; 1151 stats->q_obytes[i] = txq->bytes; 1152 stats->obytes += txq->bytes; 1153 stats->oerrors += txq->errors; 1154 } else { 1155 PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n", 1156 dev->data->port_id); 1157 } 1158 } 1159 1160 return 0; 1161 } 1162 1163 static int 1164 axgbe_dev_stats_reset(struct rte_eth_dev *dev) 1165 { 1166 struct axgbe_rx_queue *rxq; 1167 struct axgbe_tx_queue *txq; 1168 unsigned int i; 1169 1170 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1171 rxq = dev->data->rx_queues[i]; 1172 if (rxq) { 1173 rxq->pkts = 0; 1174 rxq->bytes = 0; 1175 rxq->errors = 0; 1176 rxq->rx_mbuf_alloc_failed = 0; 1177 } else { 1178 PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n", 1179 dev->data->port_id); 1180 } 1181 } 1182 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1183 txq = dev->data->tx_queues[i]; 1184 if (txq) { 1185 txq->pkts = 0; 1186 txq->bytes = 0; 1187 txq->errors = 0; 1188 } else { 1189 PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n", 1190 dev->data->port_id); 1191 } 1192 } 1193 1194 return 0; 1195 } 1196 1197 static int 1198 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 1199 { 1200 struct axgbe_port *pdata = dev->data->dev_private; 1201 1202 dev_info->max_rx_queues = pdata->rx_ring_count; 1203 dev_info->max_tx_queues = pdata->tx_ring_count; 1204 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 1205 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 1206 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1; 1207 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size; 1208 dev_info->speed_capa = ETH_LINK_SPEED_10G; 1209 1210 dev_info->rx_offload_capa = 1211 DEV_RX_OFFLOAD_VLAN_STRIP | 1212 DEV_RX_OFFLOAD_VLAN_FILTER | 1213 DEV_RX_OFFLOAD_VLAN_EXTEND | 1214 DEV_RX_OFFLOAD_IPV4_CKSUM | 1215 DEV_RX_OFFLOAD_UDP_CKSUM | 1216 DEV_RX_OFFLOAD_TCP_CKSUM | 1217 DEV_RX_OFFLOAD_JUMBO_FRAME | 1218 DEV_RX_OFFLOAD_SCATTER | 1219 DEV_RX_OFFLOAD_KEEP_CRC; 1220 1221 dev_info->tx_offload_capa = 1222 DEV_TX_OFFLOAD_VLAN_INSERT | 1223 DEV_TX_OFFLOAD_QINQ_INSERT | 1224 DEV_TX_OFFLOAD_IPV4_CKSUM | 1225 DEV_TX_OFFLOAD_UDP_CKSUM | 1226 DEV_TX_OFFLOAD_TCP_CKSUM; 1227 1228 if (pdata->hw_feat.rss) { 1229 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 1230 dev_info->reta_size = pdata->hw_feat.hash_table_size; 1231 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 1232 } 1233 1234 dev_info->rx_desc_lim = rx_desc_lim; 1235 dev_info->tx_desc_lim = tx_desc_lim; 1236 1237 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1238 .rx_free_thresh = AXGBE_RX_FREE_THRESH, 1239 }; 1240 1241 dev_info->default_txconf = (struct rte_eth_txconf) { 1242 .tx_free_thresh = AXGBE_TX_FREE_THRESH, 1243 }; 1244 1245 return 0; 1246 } 1247 1248 static int 1249 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1250 { 1251 struct axgbe_port *pdata = dev->data->dev_private; 1252 struct xgbe_fc_info fc = pdata->fc; 1253 unsigned int reg, reg_val = 0; 1254 1255 reg = MAC_Q0TFCR; 1256 reg_val = AXGMAC_IOREAD(pdata, reg); 1257 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA); 1258 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD); 1259 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT); 1260 fc.autoneg = pdata->pause_autoneg; 1261 1262 if (pdata->rx_pause && pdata->tx_pause) 1263 fc.mode = RTE_FC_FULL; 1264 else if (pdata->rx_pause) 1265 fc.mode = RTE_FC_RX_PAUSE; 1266 else if (pdata->tx_pause) 1267 fc.mode = RTE_FC_TX_PAUSE; 1268 else 1269 fc.mode = RTE_FC_NONE; 1270 1271 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024; 1272 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024; 1273 fc_conf->pause_time = fc.pause_time[0]; 1274 fc_conf->send_xon = fc.send_xon; 1275 fc_conf->mode = fc.mode; 1276 1277 return 0; 1278 } 1279 1280 static int 1281 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1282 { 1283 struct axgbe_port *pdata = dev->data->dev_private; 1284 struct xgbe_fc_info fc = pdata->fc; 1285 unsigned int reg, reg_val = 0; 1286 reg = MAC_Q0TFCR; 1287 1288 pdata->pause_autoneg = fc_conf->autoneg; 1289 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1290 fc.send_xon = fc_conf->send_xon; 1291 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA, 1292 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water)); 1293 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD, 1294 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water)); 1295 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time); 1296 AXGMAC_IOWRITE(pdata, reg, reg_val); 1297 fc.mode = fc_conf->mode; 1298 1299 if (fc.mode == RTE_FC_FULL) { 1300 pdata->tx_pause = 1; 1301 pdata->rx_pause = 1; 1302 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1303 pdata->tx_pause = 0; 1304 pdata->rx_pause = 1; 1305 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1306 pdata->tx_pause = 1; 1307 pdata->rx_pause = 0; 1308 } else { 1309 pdata->tx_pause = 0; 1310 pdata->rx_pause = 0; 1311 } 1312 1313 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1314 pdata->hw_if.config_tx_flow_control(pdata); 1315 1316 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1317 pdata->hw_if.config_rx_flow_control(pdata); 1318 1319 pdata->hw_if.config_flow_control(pdata); 1320 pdata->phy.tx_pause = pdata->tx_pause; 1321 pdata->phy.rx_pause = pdata->rx_pause; 1322 1323 return 0; 1324 } 1325 1326 static int 1327 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 1328 struct rte_eth_pfc_conf *pfc_conf) 1329 { 1330 struct axgbe_port *pdata = dev->data->dev_private; 1331 struct xgbe_fc_info fc = pdata->fc; 1332 uint8_t tc_num; 1333 1334 tc_num = pdata->pfc_map[pfc_conf->priority]; 1335 1336 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { 1337 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", 1338 pdata->hw_feat.tc_cnt); 1339 return -EINVAL; 1340 } 1341 1342 pdata->pause_autoneg = pfc_conf->fc.autoneg; 1343 pdata->phy.pause_autoneg = pdata->pause_autoneg; 1344 fc.send_xon = pfc_conf->fc.send_xon; 1345 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA, 1346 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water)); 1347 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD, 1348 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water)); 1349 1350 switch (tc_num) { 1351 case 0: 1352 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1353 PSTC0, pfc_conf->fc.pause_time); 1354 break; 1355 case 1: 1356 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1357 PSTC1, pfc_conf->fc.pause_time); 1358 break; 1359 case 2: 1360 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1361 PSTC2, pfc_conf->fc.pause_time); 1362 break; 1363 case 3: 1364 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R, 1365 PSTC3, pfc_conf->fc.pause_time); 1366 break; 1367 case 4: 1368 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1369 PSTC4, pfc_conf->fc.pause_time); 1370 break; 1371 case 5: 1372 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1373 PSTC5, pfc_conf->fc.pause_time); 1374 break; 1375 case 7: 1376 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1377 PSTC6, pfc_conf->fc.pause_time); 1378 break; 1379 case 6: 1380 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R, 1381 PSTC7, pfc_conf->fc.pause_time); 1382 break; 1383 } 1384 1385 fc.mode = pfc_conf->fc.mode; 1386 1387 if (fc.mode == RTE_FC_FULL) { 1388 pdata->tx_pause = 1; 1389 pdata->rx_pause = 1; 1390 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1391 } else if (fc.mode == RTE_FC_RX_PAUSE) { 1392 pdata->tx_pause = 0; 1393 pdata->rx_pause = 1; 1394 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1); 1395 } else if (fc.mode == RTE_FC_TX_PAUSE) { 1396 pdata->tx_pause = 1; 1397 pdata->rx_pause = 0; 1398 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1399 } else { 1400 pdata->tx_pause = 0; 1401 pdata->rx_pause = 0; 1402 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 1403 } 1404 1405 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) 1406 pdata->hw_if.config_tx_flow_control(pdata); 1407 1408 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) 1409 pdata->hw_if.config_rx_flow_control(pdata); 1410 pdata->hw_if.config_flow_control(pdata); 1411 pdata->phy.tx_pause = pdata->tx_pause; 1412 pdata->phy.rx_pause = pdata->rx_pause; 1413 1414 return 0; 1415 } 1416 1417 void 1418 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1419 struct rte_eth_rxq_info *qinfo) 1420 { 1421 struct axgbe_rx_queue *rxq; 1422 1423 rxq = dev->data->rx_queues[queue_id]; 1424 qinfo->mp = rxq->mb_pool; 1425 qinfo->scattered_rx = dev->data->scattered_rx; 1426 qinfo->nb_desc = rxq->nb_desc; 1427 qinfo->conf.rx_free_thresh = rxq->free_thresh; 1428 } 1429 1430 void 1431 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1432 struct rte_eth_txq_info *qinfo) 1433 { 1434 struct axgbe_tx_queue *txq; 1435 1436 txq = dev->data->tx_queues[queue_id]; 1437 qinfo->nb_desc = txq->nb_desc; 1438 qinfo->conf.tx_free_thresh = txq->free_thresh; 1439 } 1440 const uint32_t * 1441 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1442 { 1443 static const uint32_t ptypes[] = { 1444 RTE_PTYPE_L2_ETHER, 1445 RTE_PTYPE_L2_ETHER_TIMESYNC, 1446 RTE_PTYPE_L2_ETHER_LLDP, 1447 RTE_PTYPE_L2_ETHER_ARP, 1448 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 1449 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 1450 RTE_PTYPE_L4_FRAG, 1451 RTE_PTYPE_L4_ICMP, 1452 RTE_PTYPE_L4_NONFRAG, 1453 RTE_PTYPE_L4_SCTP, 1454 RTE_PTYPE_L4_TCP, 1455 RTE_PTYPE_L4_UDP, 1456 RTE_PTYPE_TUNNEL_GRENAT, 1457 RTE_PTYPE_TUNNEL_IP, 1458 RTE_PTYPE_INNER_L2_ETHER, 1459 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1460 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 1461 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 1462 RTE_PTYPE_INNER_L4_FRAG, 1463 RTE_PTYPE_INNER_L4_ICMP, 1464 RTE_PTYPE_INNER_L4_NONFRAG, 1465 RTE_PTYPE_INNER_L4_SCTP, 1466 RTE_PTYPE_INNER_L4_TCP, 1467 RTE_PTYPE_INNER_L4_UDP, 1468 RTE_PTYPE_UNKNOWN 1469 }; 1470 1471 if (dev->rx_pkt_burst == axgbe_recv_pkts) 1472 return ptypes; 1473 return NULL; 1474 } 1475 1476 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1477 { 1478 struct rte_eth_dev_info dev_info; 1479 struct axgbe_port *pdata = dev->data->dev_private; 1480 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 1481 unsigned int val = 0; 1482 axgbe_dev_info_get(dev, &dev_info); 1483 /* check that mtu is within the allowed range */ 1484 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 1485 return -EINVAL; 1486 /* mtu setting is forbidden if port is start */ 1487 if (dev->data->dev_started) { 1488 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", 1489 dev->data->port_id); 1490 return -EBUSY; 1491 } 1492 if (frame_size > RTE_ETHER_MAX_LEN) { 1493 dev->data->dev_conf.rxmode.offloads |= 1494 DEV_RX_OFFLOAD_JUMBO_FRAME; 1495 val = 1; 1496 } else { 1497 dev->data->dev_conf.rxmode.offloads &= 1498 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1499 val = 0; 1500 } 1501 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 1502 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1503 return 0; 1504 } 1505 1506 static void 1507 axgbe_update_tstamp_time(struct axgbe_port *pdata, 1508 unsigned int sec, unsigned int nsec, int addsub) 1509 { 1510 unsigned int count = 100; 1511 uint32_t sub_val = 0; 1512 uint32_t sub_val_sec = 0xFFFFFFFF; 1513 uint32_t sub_val_nsec = 0x3B9ACA00; 1514 1515 if (addsub) { 1516 if (sec) 1517 sub_val = sub_val_sec - (sec - 1); 1518 else 1519 sub_val = sec; 1520 1521 AXGMAC_IOWRITE(pdata, MAC_STSUR, sub_val); 1522 sub_val = sub_val_nsec - nsec; 1523 AXGMAC_IOWRITE(pdata, MAC_STNUR, sub_val); 1524 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 1); 1525 } else { 1526 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1527 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 0); 1528 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1529 } 1530 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1); 1531 /* Wait for time update to complete */ 1532 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT)) 1533 rte_delay_ms(1); 1534 } 1535 1536 static inline uint64_t 1537 div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder) 1538 { 1539 *remainder = dividend % divisor; 1540 return dividend / divisor; 1541 } 1542 1543 static inline uint64_t 1544 div_u64(uint64_t dividend, uint32_t divisor) 1545 { 1546 uint32_t remainder; 1547 return div_u64_rem(dividend, divisor, &remainder); 1548 } 1549 1550 static int 1551 axgbe_adjfreq(struct axgbe_port *pdata, int64_t delta) 1552 { 1553 uint64_t adjust; 1554 uint32_t addend, diff; 1555 unsigned int neg_adjust = 0; 1556 1557 if (delta < 0) { 1558 neg_adjust = 1; 1559 delta = -delta; 1560 } 1561 adjust = (uint64_t)pdata->tstamp_addend; 1562 adjust *= delta; 1563 diff = (uint32_t)div_u64(adjust, 1000000000UL); 1564 addend = (neg_adjust) ? pdata->tstamp_addend - diff : 1565 pdata->tstamp_addend + diff; 1566 pdata->tstamp_addend = addend; 1567 axgbe_update_tstamp_addend(pdata, addend); 1568 return 0; 1569 } 1570 1571 static int 1572 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 1573 { 1574 struct axgbe_port *pdata = dev->data->dev_private; 1575 struct timespec timestamp_delta; 1576 1577 axgbe_adjfreq(pdata, delta); 1578 pdata->systime_tc.nsec += delta; 1579 1580 if (delta < 0) { 1581 delta = -delta; 1582 timestamp_delta = rte_ns_to_timespec(delta); 1583 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec, 1584 timestamp_delta.tv_nsec, 1); 1585 } else { 1586 timestamp_delta = rte_ns_to_timespec(delta); 1587 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec, 1588 timestamp_delta.tv_nsec, 0); 1589 } 1590 return 0; 1591 } 1592 1593 static int 1594 axgbe_timesync_read_time(struct rte_eth_dev *dev, 1595 struct timespec *timestamp) 1596 { 1597 uint64_t nsec; 1598 struct axgbe_port *pdata = dev->data->dev_private; 1599 1600 nsec = AXGMAC_IOREAD(pdata, MAC_STSR); 1601 nsec *= NSEC_PER_SEC; 1602 nsec += AXGMAC_IOREAD(pdata, MAC_STNR); 1603 *timestamp = rte_ns_to_timespec(nsec); 1604 return 0; 1605 } 1606 static int 1607 axgbe_timesync_write_time(struct rte_eth_dev *dev, 1608 const struct timespec *timestamp) 1609 { 1610 unsigned int count = 100; 1611 struct axgbe_port *pdata = dev->data->dev_private; 1612 1613 AXGMAC_IOWRITE(pdata, MAC_STSUR, timestamp->tv_sec); 1614 AXGMAC_IOWRITE(pdata, MAC_STNUR, timestamp->tv_nsec); 1615 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1); 1616 /* Wait for time update to complete */ 1617 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT)) 1618 rte_delay_ms(1); 1619 if (!count) 1620 PMD_DRV_LOG(ERR, "Timed out update timestamp\n"); 1621 return 0; 1622 } 1623 1624 static void 1625 axgbe_update_tstamp_addend(struct axgbe_port *pdata, 1626 uint32_t addend) 1627 { 1628 unsigned int count = 100; 1629 1630 AXGMAC_IOWRITE(pdata, MAC_TSAR, addend); 1631 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); 1632 1633 /* Wait for addend update to complete */ 1634 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) 1635 rte_delay_ms(1); 1636 if (!count) 1637 PMD_DRV_LOG(ERR, "Timed out updating timestamp addend register\n"); 1638 } 1639 1640 static void 1641 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec, 1642 unsigned int nsec) 1643 { 1644 unsigned int count = 100; 1645 1646 /*System Time Sec Update*/ 1647 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1648 /*System Time nanoSec Update*/ 1649 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1650 /*Initialize Timestamp*/ 1651 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); 1652 1653 /* Wait for time update to complete */ 1654 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) 1655 rte_delay_ms(1); 1656 if (!count) 1657 PMD_DRV_LOG(ERR, "Timed out initializing timestamp\n"); 1658 } 1659 1660 static int 1661 axgbe_timesync_enable(struct rte_eth_dev *dev) 1662 { 1663 struct axgbe_port *pdata = dev->data->dev_private; 1664 unsigned int mac_tscr = 0; 1665 uint64_t dividend; 1666 struct timespec timestamp; 1667 uint64_t nsec; 1668 1669 /* Set one nano-second accuracy */ 1670 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); 1671 1672 /* Set fine timestamp update */ 1673 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); 1674 1675 /* Overwrite earlier timestamps */ 1676 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); 1677 1678 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1679 1680 /* Enabling processing of ptp over eth pkt */ 1681 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); 1682 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); 1683 /* Enable timestamp for all pkts*/ 1684 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1); 1685 1686 /* enabling timestamp */ 1687 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); 1688 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1689 1690 /* Exit if timestamping is not enabled */ 1691 if (!AXGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) { 1692 PMD_DRV_LOG(ERR, "Exiting as timestamp is not enabled\n"); 1693 return 0; 1694 } 1695 1696 /* Sub-second Increment Value*/ 1697 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, AXGBE_TSTAMP_SSINC); 1698 /* Sub-nanosecond Increment Value */ 1699 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, AXGBE_TSTAMP_SNSINC); 1700 1701 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 1702 dividend = 50000000; 1703 dividend <<= 32; 1704 pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate); 1705 1706 axgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); 1707 axgbe_set_tstamp_time(pdata, 0, 0); 1708 1709 /* Initialize the timecounter */ 1710 memset(&pdata->systime_tc, 0, sizeof(struct rte_timecounter)); 1711 1712 pdata->systime_tc.cc_mask = AXGBE_CYCLECOUNTER_MASK; 1713 pdata->systime_tc.cc_shift = 0; 1714 pdata->systime_tc.nsec_mask = 0; 1715 1716 PMD_DRV_LOG(DEBUG, "Initializing system time counter with realtime\n"); 1717 1718 /* Updating the counter once with clock real time */ 1719 clock_gettime(CLOCK_REALTIME, ×tamp); 1720 nsec = rte_timespec_to_ns(×tamp); 1721 nsec = rte_timecounter_update(&pdata->systime_tc, nsec); 1722 axgbe_set_tstamp_time(pdata, timestamp.tv_sec, timestamp.tv_nsec); 1723 return 0; 1724 } 1725 1726 static int 1727 axgbe_timesync_disable(struct rte_eth_dev *dev) 1728 { 1729 struct axgbe_port *pdata = dev->data->dev_private; 1730 unsigned int mac_tscr = 0; 1731 1732 /*disable timestamp for all pkts*/ 1733 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 0); 1734 /*disable the addened register*/ 1735 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 0); 1736 /* disable timestamp update */ 1737 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 0); 1738 /*disable time stamp*/ 1739 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 0); 1740 return 0; 1741 } 1742 1743 static int 1744 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 1745 struct timespec *timestamp, uint32_t flags) 1746 { 1747 uint64_t nsec = 0; 1748 volatile union axgbe_rx_desc *desc; 1749 uint16_t idx, pmt; 1750 struct axgbe_rx_queue *rxq = *dev->data->rx_queues; 1751 1752 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 1753 desc = &rxq->desc[idx]; 1754 1755 while (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 1756 rte_delay_ms(1); 1757 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CTXT)) { 1758 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_CONTEXT_DESC3, TSA) && 1759 !AXGMAC_GET_BITS_LE(desc->write.desc3, 1760 RX_CONTEXT_DESC3, TSD)) { 1761 pmt = AXGMAC_GET_BITS_LE(desc->write.desc3, 1762 RX_CONTEXT_DESC3, PMT); 1763 nsec = rte_le_to_cpu_32(desc->write.desc1); 1764 nsec *= NSEC_PER_SEC; 1765 nsec += rte_le_to_cpu_32(desc->write.desc0); 1766 if (nsec != 0xffffffffffffffffULL) { 1767 if (pmt == 0x01) 1768 *timestamp = rte_ns_to_timespec(nsec); 1769 PMD_DRV_LOG(DEBUG, 1770 "flags = 0x%x nsec = %"PRIu64"\n", 1771 flags, nsec); 1772 } 1773 } 1774 } 1775 1776 return 0; 1777 } 1778 1779 static int 1780 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 1781 struct timespec *timestamp) 1782 { 1783 uint64_t nsec; 1784 struct axgbe_port *pdata = dev->data->dev_private; 1785 unsigned int tx_snr, tx_ssr; 1786 1787 rte_delay_us(5); 1788 if (pdata->vdata->tx_tstamp_workaround) { 1789 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR); 1790 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR); 1791 1792 } else { 1793 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR); 1794 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR); 1795 } 1796 if (AXGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) { 1797 PMD_DRV_LOG(DEBUG, "Waiting for TXTSSTSMIS\n"); 1798 return 0; 1799 } 1800 nsec = tx_ssr; 1801 nsec *= NSEC_PER_SEC; 1802 nsec += tx_snr; 1803 PMD_DRV_LOG(DEBUG, "nsec = %"PRIu64" tx_ssr = %d tx_snr = %d\n", 1804 nsec, tx_ssr, tx_snr); 1805 *timestamp = rte_ns_to_timespec(nsec); 1806 return 0; 1807 } 1808 1809 static int 1810 axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on) 1811 { 1812 struct axgbe_port *pdata = dev->data->dev_private; 1813 unsigned long vid_bit, vid_idx; 1814 1815 vid_bit = VLAN_TABLE_BIT(vid); 1816 vid_idx = VLAN_TABLE_IDX(vid); 1817 1818 if (on) { 1819 PMD_DRV_LOG(DEBUG, "Set VLAN vid=%d for device = %s\n", 1820 vid, pdata->eth_dev->device->name); 1821 pdata->active_vlans[vid_idx] |= vid_bit; 1822 } else { 1823 PMD_DRV_LOG(DEBUG, "Reset VLAN vid=%d for device = %s\n", 1824 vid, pdata->eth_dev->device->name); 1825 pdata->active_vlans[vid_idx] &= ~vid_bit; 1826 } 1827 pdata->hw_if.update_vlan_hash_table(pdata); 1828 return 0; 1829 } 1830 1831 static int 1832 axgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1833 enum rte_vlan_type vlan_type, 1834 uint16_t tpid) 1835 { 1836 struct axgbe_port *pdata = dev->data->dev_private; 1837 uint32_t reg = 0; 1838 uint32_t qinq = 0; 1839 1840 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP); 1841 PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq); 1842 1843 switch (vlan_type) { 1844 case ETH_VLAN_TYPE_INNER: 1845 PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n"); 1846 if (qinq) { 1847 if (tpid != 0x8100 && tpid != 0x88a8) 1848 PMD_DRV_LOG(ERR, 1849 "tag supported 0x8100/0x88A8\n"); 1850 PMD_DRV_LOG(DEBUG, "qinq with inner tag\n"); 1851 1852 /*Enable Inner VLAN Tag */ 1853 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 1); 1854 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT); 1855 PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg); 1856 1857 } else { 1858 PMD_DRV_LOG(ERR, 1859 "Inner type not supported in single tag\n"); 1860 } 1861 break; 1862 case ETH_VLAN_TYPE_OUTER: 1863 PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n"); 1864 if (qinq) { 1865 PMD_DRV_LOG(DEBUG, "double tagging is enabled\n"); 1866 /*Enable outer VLAN tag*/ 1867 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 0); 1868 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT); 1869 PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg); 1870 1871 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 1); 1872 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANIR, CSVL); 1873 PMD_DRV_LOG(DEBUG, "bit CSVL = 0x%x\n", reg); 1874 } else { 1875 if (tpid != 0x8100 && tpid != 0x88a8) 1876 PMD_DRV_LOG(ERR, 1877 "tag supported 0x8100/0x88A8\n"); 1878 } 1879 break; 1880 case ETH_VLAN_TYPE_MAX: 1881 PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n"); 1882 break; 1883 case ETH_VLAN_TYPE_UNKNOWN: 1884 PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n"); 1885 break; 1886 } 1887 return 0; 1888 } 1889 1890 static void axgbe_vlan_extend_enable(struct axgbe_port *pdata) 1891 { 1892 int qinq = 0; 1893 1894 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 1); 1895 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP); 1896 PMD_DRV_LOG(DEBUG, "vlan double tag enabled EDVLP:qinq=0x%x\n", qinq); 1897 } 1898 1899 static void axgbe_vlan_extend_disable(struct axgbe_port *pdata) 1900 { 1901 int qinq = 0; 1902 1903 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 0); 1904 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP); 1905 PMD_DRV_LOG(DEBUG, "vlan double tag disable EDVLP:qinq=0x%x\n", qinq); 1906 } 1907 1908 static int 1909 axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1910 { 1911 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 1912 struct axgbe_port *pdata = dev->data->dev_private; 1913 1914 /* Indicate that VLAN Tx CTAGs come from context descriptors */ 1915 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); 1916 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); 1917 1918 if (mask & ETH_VLAN_STRIP_MASK) { 1919 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 1920 PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n", 1921 pdata->eth_dev->device->name); 1922 pdata->hw_if.enable_rx_vlan_stripping(pdata); 1923 } else { 1924 PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n", 1925 pdata->eth_dev->device->name); 1926 pdata->hw_if.disable_rx_vlan_stripping(pdata); 1927 } 1928 } 1929 if (mask & ETH_VLAN_FILTER_MASK) { 1930 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 1931 PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n", 1932 pdata->eth_dev->device->name); 1933 pdata->hw_if.enable_rx_vlan_filtering(pdata); 1934 } else { 1935 PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n", 1936 pdata->eth_dev->device->name); 1937 pdata->hw_if.disable_rx_vlan_filtering(pdata); 1938 } 1939 } 1940 if (mask & ETH_VLAN_EXTEND_MASK) { 1941 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) { 1942 PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n"); 1943 axgbe_vlan_extend_enable(pdata); 1944 /* Set global registers with default ethertype*/ 1945 axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, 1946 RTE_ETHER_TYPE_VLAN); 1947 axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, 1948 RTE_ETHER_TYPE_VLAN); 1949 } else { 1950 PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n"); 1951 axgbe_vlan_extend_disable(pdata); 1952 } 1953 } 1954 return 0; 1955 } 1956 1957 static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 1958 { 1959 unsigned int mac_hfr0, mac_hfr1, mac_hfr2, mac_hfr3; 1960 struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 1961 1962 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 1963 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 1964 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 1965 mac_hfr3 = AXGMAC_IOREAD(pdata, MAC_HWF3R); 1966 1967 memset(hw_feat, 0, sizeof(*hw_feat)); 1968 1969 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 1970 1971 /* Hardware feature register 0 */ 1972 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 1973 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 1974 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 1975 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 1976 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 1977 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 1978 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 1979 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 1980 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 1981 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 1982 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 1983 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 1984 ADDMACADRSEL); 1985 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 1986 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 1987 1988 /* Hardware feature register 1 */ 1989 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1990 RXFIFOSIZE); 1991 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 1992 TXFIFOSIZE); 1993 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 1994 MAC_HWF1R, ADVTHWORD); 1995 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 1996 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 1997 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 1998 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 1999 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 2000 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 2001 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 2002 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 2003 HASHTBLSZ); 2004 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 2005 L3L4FNUM); 2006 2007 /* Hardware feature register 2 */ 2008 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 2009 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 2010 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 2011 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 2012 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 2013 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 2014 AUXSNAPNUM); 2015 2016 /* Hardware feature register 3 */ 2017 hw_feat->tx_q_vlan_tag_ins = AXGMAC_GET_BITS(mac_hfr3, 2018 MAC_HWF3R, CBTISEL); 2019 hw_feat->no_of_vlan_extn = AXGMAC_GET_BITS(mac_hfr3, 2020 MAC_HWF3R, NRVF); 2021 2022 /* Translate the Hash Table size into actual number */ 2023 switch (hw_feat->hash_table_size) { 2024 case 0: 2025 break; 2026 case 1: 2027 hw_feat->hash_table_size = 64; 2028 break; 2029 case 2: 2030 hw_feat->hash_table_size = 128; 2031 break; 2032 case 3: 2033 hw_feat->hash_table_size = 256; 2034 break; 2035 } 2036 2037 /* Translate the address width setting into actual number */ 2038 switch (hw_feat->dma_width) { 2039 case 0: 2040 hw_feat->dma_width = 32; 2041 break; 2042 case 1: 2043 hw_feat->dma_width = 40; 2044 break; 2045 case 2: 2046 hw_feat->dma_width = 48; 2047 break; 2048 default: 2049 hw_feat->dma_width = 32; 2050 } 2051 2052 /* The Queue, Channel and TC counts are zero based so increment them 2053 * to get the actual number 2054 */ 2055 hw_feat->rx_q_cnt++; 2056 hw_feat->tx_q_cnt++; 2057 hw_feat->rx_ch_cnt++; 2058 hw_feat->tx_ch_cnt++; 2059 hw_feat->tc_cnt++; 2060 2061 /* Translate the fifo sizes into actual numbers */ 2062 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 2063 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 2064 } 2065 2066 static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 2067 { 2068 axgbe_init_function_ptrs_dev(&pdata->hw_if); 2069 axgbe_init_function_ptrs_phy(&pdata->phy_if); 2070 axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 2071 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 2072 } 2073 2074 static void axgbe_set_counts(struct axgbe_port *pdata) 2075 { 2076 /* Set all the function pointers */ 2077 axgbe_init_all_fptrs(pdata); 2078 2079 /* Populate the hardware features */ 2080 axgbe_get_all_hw_features(pdata); 2081 2082 /* Set default max values if not provided */ 2083 if (!pdata->tx_max_channel_count) 2084 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 2085 if (!pdata->rx_max_channel_count) 2086 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 2087 2088 if (!pdata->tx_max_q_count) 2089 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 2090 if (!pdata->rx_max_q_count) 2091 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 2092 2093 /* Calculate the number of Tx and Rx rings to be created 2094 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 2095 * the number of Tx queues to the number of Tx channels 2096 * enabled 2097 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 2098 * number of Rx queues or maximum allowed 2099 */ 2100 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 2101 pdata->tx_max_channel_count); 2102 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 2103 pdata->tx_max_q_count); 2104 2105 pdata->tx_q_count = pdata->tx_ring_count; 2106 2107 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 2108 pdata->rx_max_channel_count); 2109 2110 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 2111 pdata->rx_max_q_count); 2112 } 2113 2114 static void axgbe_default_config(struct axgbe_port *pdata) 2115 { 2116 pdata->pblx8 = DMA_PBL_X8_ENABLE; 2117 pdata->tx_sf_mode = MTL_TSF_ENABLE; 2118 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 2119 pdata->tx_pbl = DMA_PBL_32; 2120 pdata->tx_osp_mode = DMA_OSP_ENABLE; 2121 pdata->rx_sf_mode = MTL_RSF_ENABLE; 2122 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 2123 pdata->rx_pbl = DMA_PBL_32; 2124 pdata->pause_autoneg = 1; 2125 pdata->tx_pause = 0; 2126 pdata->rx_pause = 0; 2127 pdata->phy_speed = SPEED_UNKNOWN; 2128 pdata->power_down = 0; 2129 } 2130 2131 static int 2132 pci_device_cmp(const struct rte_device *dev, const void *_pci_id) 2133 { 2134 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev); 2135 const struct rte_pci_id *pcid = _pci_id; 2136 2137 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID && 2138 pdev->id.device_id == pcid->device_id) 2139 return 0; 2140 return 1; 2141 } 2142 2143 static bool 2144 pci_search_device(int device_id) 2145 { 2146 struct rte_bus *pci_bus; 2147 struct rte_pci_id dev_id; 2148 2149 dev_id.device_id = device_id; 2150 pci_bus = rte_bus_find_by_name("pci"); 2151 return (pci_bus != NULL) && 2152 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL); 2153 } 2154 2155 /* 2156 * It returns 0 on success. 2157 */ 2158 static int 2159 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 2160 { 2161 PMD_INIT_FUNC_TRACE(); 2162 struct axgbe_port *pdata; 2163 struct rte_pci_device *pci_dev; 2164 uint32_t reg, mac_lo, mac_hi; 2165 uint32_t len; 2166 int ret; 2167 2168 eth_dev->dev_ops = &axgbe_eth_dev_ops; 2169 2170 eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status; 2171 eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status; 2172 2173 /* 2174 * For secondary processes, we don't initialise any further as primary 2175 * has already done this work. 2176 */ 2177 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2178 return 0; 2179 2180 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2181 2182 pdata = eth_dev->data->dev_private; 2183 /* initial state */ 2184 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state); 2185 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state); 2186 pdata->eth_dev = eth_dev; 2187 2188 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2189 pdata->pci_dev = pci_dev; 2190 2191 /* 2192 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE 2193 */ 2194 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) { 2195 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF; 2196 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT; 2197 } else { 2198 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 2199 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 2200 } 2201 2202 pdata->xgmac_regs = 2203 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 2204 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 2205 + AXGBE_MAC_PROP_OFFSET); 2206 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 2207 + AXGBE_I2C_CTRL_OFFSET); 2208 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 2209 2210 /* version specific driver data*/ 2211 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 2212 pdata->vdata = &axgbe_v2a; 2213 else 2214 pdata->vdata = &axgbe_v2b; 2215 2216 /* Configure the PCS indirect addressing support */ 2217 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); 2218 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 2219 pdata->xpcs_window <<= 6; 2220 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 2221 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 2222 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 2223 2224 PMD_INIT_LOG(DEBUG, 2225 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 2226 pdata->xpcs_window_size, pdata->xpcs_window_mask); 2227 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 2228 2229 /* Retrieve the MAC address */ 2230 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 2231 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 2232 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 2233 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 2234 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 2235 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 2236 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 2237 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 2238 2239 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS; 2240 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0); 2241 2242 if (!eth_dev->data->mac_addrs) { 2243 PMD_INIT_LOG(ERR, 2244 "Failed to alloc %u bytes needed to " 2245 "store MAC addresses", len); 2246 return -ENOMEM; 2247 } 2248 2249 /* Allocate memory for storing hash filter MAC addresses */ 2250 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS; 2251 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr", 2252 len, 0); 2253 2254 if (eth_dev->data->hash_mac_addrs == NULL) { 2255 PMD_INIT_LOG(ERR, 2256 "Failed to allocate %d bytes needed to " 2257 "store MAC addresses", len); 2258 return -ENOMEM; 2259 } 2260 2261 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 2262 rte_eth_random_addr(pdata->mac_addr.addr_bytes); 2263 2264 /* Copy the permanent MAC address */ 2265 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 2266 2267 /* Clock settings */ 2268 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 2269 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 2270 2271 /* Set the DMA coherency values */ 2272 pdata->coherent = 1; 2273 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 2274 pdata->arcache = AXGBE_DMA_OS_ARCACHE; 2275 pdata->awcache = AXGBE_DMA_OS_AWCACHE; 2276 2277 /* Set the maximum channels and queues */ 2278 reg = XP_IOREAD(pdata, XP_PROP_1); 2279 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 2280 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 2281 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 2282 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 2283 2284 /* Set the hardware channel and queue counts */ 2285 axgbe_set_counts(pdata); 2286 2287 /* Set the maximum fifo amounts */ 2288 reg = XP_IOREAD(pdata, XP_PROP_2); 2289 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 2290 pdata->tx_max_fifo_size *= 16384; 2291 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 2292 pdata->vdata->tx_max_fifo_size); 2293 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 2294 pdata->rx_max_fifo_size *= 16384; 2295 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 2296 pdata->vdata->rx_max_fifo_size); 2297 /* Issue software reset to DMA */ 2298 ret = pdata->hw_if.exit(pdata); 2299 if (ret) 2300 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 2301 2302 /* Set default configuration data */ 2303 axgbe_default_config(pdata); 2304 2305 /* Set default max values if not provided */ 2306 if (!pdata->tx_max_fifo_size) 2307 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 2308 if (!pdata->rx_max_fifo_size) 2309 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 2310 2311 pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 2312 pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 2313 pthread_mutex_init(&pdata->xpcs_mutex, NULL); 2314 pthread_mutex_init(&pdata->i2c_mutex, NULL); 2315 pthread_mutex_init(&pdata->an_mutex, NULL); 2316 pthread_mutex_init(&pdata->phy_mutex, NULL); 2317 2318 ret = pdata->phy_if.phy_init(pdata); 2319 if (ret) { 2320 rte_free(eth_dev->data->mac_addrs); 2321 eth_dev->data->mac_addrs = NULL; 2322 return ret; 2323 } 2324 2325 rte_intr_callback_register(&pci_dev->intr_handle, 2326 axgbe_dev_interrupt_handler, 2327 (void *)eth_dev); 2328 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 2329 eth_dev->data->port_id, pci_dev->id.vendor_id, 2330 pci_dev->id.device_id); 2331 2332 return 0; 2333 } 2334 2335 static int 2336 axgbe_dev_close(struct rte_eth_dev *eth_dev) 2337 { 2338 struct rte_pci_device *pci_dev; 2339 2340 PMD_INIT_FUNC_TRACE(); 2341 2342 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2343 return 0; 2344 2345 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2346 axgbe_dev_clear_queues(eth_dev); 2347 2348 /* disable uio intr before callback unregister */ 2349 rte_intr_disable(&pci_dev->intr_handle); 2350 rte_intr_callback_unregister(&pci_dev->intr_handle, 2351 axgbe_dev_interrupt_handler, 2352 (void *)eth_dev); 2353 2354 return 0; 2355 } 2356 2357 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2358 struct rte_pci_device *pci_dev) 2359 { 2360 return rte_eth_dev_pci_generic_probe(pci_dev, 2361 sizeof(struct axgbe_port), eth_axgbe_dev_init); 2362 } 2363 2364 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 2365 { 2366 return rte_eth_dev_pci_generic_remove(pci_dev, axgbe_dev_close); 2367 } 2368 2369 static struct rte_pci_driver rte_axgbe_pmd = { 2370 .id_table = pci_id_axgbe_map, 2371 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 2372 .probe = eth_axgbe_pci_probe, 2373 .remove = eth_axgbe_pci_remove, 2374 }; 2375 2376 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 2377 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 2378 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 2379 RTE_LOG_REGISTER(axgbe_logtype_init, pmd.net.axgbe.init, NOTICE); 2380 RTE_LOG_REGISTER(axgbe_logtype_driver, pmd.net.axgbe.driver, NOTICE); 2381