xref: /dpdk/drivers/net/axgbe/axgbe_ethdev.c (revision b58d8781fa1fa573f9d5e9af81a4288fddf1e0a8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10 #include "axgbe_regs.h"
11 
12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
14 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
15 static int  axgbe_dev_start(struct rte_eth_dev *dev);
16 static void axgbe_dev_stop(struct rte_eth_dev *dev);
17 static void axgbe_dev_interrupt_handler(void *param);
18 static void axgbe_dev_close(struct rte_eth_dev *dev);
19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
24 				  struct rte_ether_addr *mac_addr);
25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
26 				  struct rte_ether_addr *mac_addr,
27 				  uint32_t index,
28 				  uint32_t vmdq);
29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
31 				      struct rte_ether_addr *mc_addr_set,
32 				      uint32_t nb_mc_addr);
33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
34 				       struct rte_ether_addr *mac_addr,
35 				       uint8_t add);
36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev,
37 					   uint8_t add);
38 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
39 				 int wait_to_complete);
40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
41 			      struct rte_dev_reg_info *regs);
42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
43 				struct rte_eth_stats *stats);
44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
46 				struct rte_eth_xstat *stats,
47 				unsigned int n);
48 static int
49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
50 			   struct rte_eth_xstat_name *xstats_names,
51 			   unsigned int size);
52 static int
53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
54 			   const uint64_t *ids,
55 			   uint64_t *values,
56 			   unsigned int n);
57 static int
58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
59 				 struct rte_eth_xstat_name *xstats_names,
60 				 const uint64_t *ids,
61 				 unsigned int size);
62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
63 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
64 			  struct rte_eth_rss_reta_entry64 *reta_conf,
65 			  uint16_t reta_size);
66 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
67 			 struct rte_eth_rss_reta_entry64 *reta_conf,
68 			 uint16_t reta_size);
69 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
70 				     struct rte_eth_rss_conf *rss_conf);
71 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
72 				       struct rte_eth_rss_conf *rss_conf);
73 static int  axgbe_dev_info_get(struct rte_eth_dev *dev,
74 			       struct rte_eth_dev_info *dev_info);
75 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev,
76 				struct rte_eth_fc_conf *fc_conf);
77 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev,
78 				struct rte_eth_fc_conf *fc_conf);
79 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
80 				struct rte_eth_pfc_conf *pfc_conf);
81 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
82 	struct rte_eth_rxq_info *qinfo);
83 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
84 	struct rte_eth_txq_info *qinfo);
85 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
86 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
87 
88 struct axgbe_xstats {
89 	char name[RTE_ETH_XSTATS_NAME_SIZE];
90 	int offset;
91 };
92 
93 #define AXGMAC_MMC_STAT(_string, _var)                           \
94 	{ _string,                                              \
95 	  offsetof(struct axgbe_mmc_stats, _var),       \
96 	}
97 
98 static const struct axgbe_xstats axgbe_xstats_strings[] = {
99 	AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
100 	AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
101 	AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
102 	AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
103 	AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
104 	AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
105 	AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
106 	AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
107 	AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
108 	AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
109 	AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
110 	AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
111 	AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
112 	AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
113 
114 	AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
115 	AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
116 	AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
117 	AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
118 	AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
119 	AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
120 	AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
121 	AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
122 	AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
123 	AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
124 	AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
125 	AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
126 	AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
127 	AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
128 	AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
129 	AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
130 	AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
131 	AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
132 	AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
133 	AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
134 	AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
135 	AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
136 };
137 
138 #define AXGBE_XSTATS_COUNT        ARRAY_SIZE(axgbe_xstats_strings)
139 
140 /* The set of PCI devices this driver supports */
141 #define AMD_PCI_VENDOR_ID       0x1022
142 #define AMD_PCI_RV_ROOT_COMPLEX_ID	0x15d0
143 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
144 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
145 
146 int axgbe_logtype_init;
147 int axgbe_logtype_driver;
148 
149 static const struct rte_pci_id pci_id_axgbe_map[] = {
150 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
151 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
152 	{ .vendor_id = 0, },
153 };
154 
155 static struct axgbe_version_data axgbe_v2a = {
156 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
157 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
158 	.mmc_64bit			= 1,
159 	.tx_max_fifo_size		= 229376,
160 	.rx_max_fifo_size		= 229376,
161 	.tx_tstamp_workaround		= 1,
162 	.ecc_support			= 1,
163 	.i2c_support			= 1,
164 	.an_cdr_workaround		= 1,
165 };
166 
167 static struct axgbe_version_data axgbe_v2b = {
168 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
169 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
170 	.mmc_64bit			= 1,
171 	.tx_max_fifo_size		= 65536,
172 	.rx_max_fifo_size		= 65536,
173 	.tx_tstamp_workaround		= 1,
174 	.ecc_support			= 1,
175 	.i2c_support			= 1,
176 	.an_cdr_workaround		= 1,
177 };
178 
179 static const struct rte_eth_desc_lim rx_desc_lim = {
180 	.nb_max = AXGBE_MAX_RING_DESC,
181 	.nb_min = AXGBE_MIN_RING_DESC,
182 	.nb_align = 8,
183 };
184 
185 static const struct rte_eth_desc_lim tx_desc_lim = {
186 	.nb_max = AXGBE_MAX_RING_DESC,
187 	.nb_min = AXGBE_MIN_RING_DESC,
188 	.nb_align = 8,
189 };
190 
191 static const struct eth_dev_ops axgbe_eth_dev_ops = {
192 	.dev_configure        = axgbe_dev_configure,
193 	.dev_start            = axgbe_dev_start,
194 	.dev_stop             = axgbe_dev_stop,
195 	.dev_close            = axgbe_dev_close,
196 	.promiscuous_enable   = axgbe_dev_promiscuous_enable,
197 	.promiscuous_disable  = axgbe_dev_promiscuous_disable,
198 	.allmulticast_enable  = axgbe_dev_allmulticast_enable,
199 	.allmulticast_disable = axgbe_dev_allmulticast_disable,
200 	.mac_addr_set         = axgbe_dev_mac_addr_set,
201 	.mac_addr_add         = axgbe_dev_mac_addr_add,
202 	.mac_addr_remove      = axgbe_dev_mac_addr_remove,
203 	.set_mc_addr_list     = axgbe_dev_set_mc_addr_list,
204 	.uc_hash_table_set    = axgbe_dev_uc_hash_table_set,
205 	.uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set,
206 	.link_update          = axgbe_dev_link_update,
207 	.get_reg	      = axgbe_dev_get_regs,
208 	.stats_get            = axgbe_dev_stats_get,
209 	.stats_reset          = axgbe_dev_stats_reset,
210 	.xstats_get	      = axgbe_dev_xstats_get,
211 	.xstats_reset	      = axgbe_dev_xstats_reset,
212 	.xstats_get_names     = axgbe_dev_xstats_get_names,
213 	.xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
214 	.xstats_get_by_id     = axgbe_dev_xstats_get_by_id,
215 	.reta_update          = axgbe_dev_rss_reta_update,
216 	.reta_query           = axgbe_dev_rss_reta_query,
217 	.rss_hash_update      = axgbe_dev_rss_hash_update,
218 	.rss_hash_conf_get    = axgbe_dev_rss_hash_conf_get,
219 	.dev_infos_get        = axgbe_dev_info_get,
220 	.rx_queue_setup       = axgbe_dev_rx_queue_setup,
221 	.rx_queue_release     = axgbe_dev_rx_queue_release,
222 	.tx_queue_setup       = axgbe_dev_tx_queue_setup,
223 	.tx_queue_release     = axgbe_dev_tx_queue_release,
224 	.flow_ctrl_get        = axgbe_flow_ctrl_get,
225 	.flow_ctrl_set        = axgbe_flow_ctrl_set,
226 	.priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set,
227 	.rxq_info_get                 = axgbe_rxq_info_get,
228 	.txq_info_get                 = axgbe_txq_info_get,
229 	.dev_supported_ptypes_get     = axgbe_dev_supported_ptypes_get,
230 	.rx_descriptor_status         = axgbe_dev_rx_descriptor_status,
231 	.tx_descriptor_status         = axgbe_dev_tx_descriptor_status,
232 	.mtu_set		= axgb_mtu_set,
233 };
234 
235 static int axgbe_phy_reset(struct axgbe_port *pdata)
236 {
237 	pdata->phy_link = -1;
238 	pdata->phy_speed = SPEED_UNKNOWN;
239 	return pdata->phy_if.phy_reset(pdata);
240 }
241 
242 /*
243  * Interrupt handler triggered by NIC  for handling
244  * specific interrupt.
245  *
246  * @param handle
247  *  Pointer to interrupt handle.
248  * @param param
249  *  The address of parameter (struct rte_eth_dev *) regsitered before.
250  *
251  * @return
252  *  void
253  */
254 static void
255 axgbe_dev_interrupt_handler(void *param)
256 {
257 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
258 	struct axgbe_port *pdata = dev->data->dev_private;
259 	unsigned int dma_isr, dma_ch_isr;
260 
261 	pdata->phy_if.an_isr(pdata);
262 	/*DMA related interrupts*/
263 	dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
264 	PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
265 	if (dma_isr) {
266 		if (dma_isr & 1) {
267 			dma_ch_isr =
268 				AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
269 						  pdata->rx_queues[0],
270 						  DMA_CH_SR);
271 			PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
272 			AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
273 					   pdata->rx_queues[0],
274 					   DMA_CH_SR, dma_ch_isr);
275 		}
276 	}
277 	/* Unmask interrupts since disabled after generation */
278 	rte_intr_ack(&pdata->pci_dev->intr_handle);
279 }
280 
281 /*
282  * Configure device link speed and setup link.
283  * It returns 0 on success.
284  */
285 static int
286 axgbe_dev_configure(struct rte_eth_dev *dev)
287 {
288 	struct axgbe_port *pdata =  dev->data->dev_private;
289 	/* Checksum offload to hardware */
290 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
291 				DEV_RX_OFFLOAD_CHECKSUM;
292 	return 0;
293 }
294 
295 static int
296 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
297 {
298 	struct axgbe_port *pdata = dev->data->dev_private;
299 
300 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
301 		pdata->rss_enable = 1;
302 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
303 		pdata->rss_enable = 0;
304 	else
305 		return  -1;
306 	return 0;
307 }
308 
309 static int
310 axgbe_dev_start(struct rte_eth_dev *dev)
311 {
312 	struct axgbe_port *pdata = dev->data->dev_private;
313 	int ret;
314 	struct rte_eth_dev_data *dev_data = dev->data;
315 	uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
316 
317 	dev->dev_ops = &axgbe_eth_dev_ops;
318 
319 	PMD_INIT_FUNC_TRACE();
320 
321 	/* Multiqueue RSS */
322 	ret = axgbe_dev_rx_mq_config(dev);
323 	if (ret) {
324 		PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
325 		return ret;
326 	}
327 	ret = axgbe_phy_reset(pdata);
328 	if (ret) {
329 		PMD_DRV_LOG(ERR, "phy reset failed\n");
330 		return ret;
331 	}
332 	ret = pdata->hw_if.init(pdata);
333 	if (ret) {
334 		PMD_DRV_LOG(ERR, "dev_init failed\n");
335 		return ret;
336 	}
337 
338 	/* enable uio/vfio intr/eventfd mapping */
339 	rte_intr_enable(&pdata->pci_dev->intr_handle);
340 
341 	/* phy start*/
342 	pdata->phy_if.phy_start(pdata);
343 	axgbe_dev_enable_tx(dev);
344 	axgbe_dev_enable_rx(dev);
345 
346 	axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
347 	axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
348 	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
349 				max_pkt_len > pdata->rx_buf_size)
350 		dev_data->scattered_rx = 1;
351 
352 	/*  Scatter Rx handling */
353 	if (dev_data->scattered_rx)
354 		dev->rx_pkt_burst = &eth_axgbe_recv_scattered_pkts;
355 	else
356 		dev->rx_pkt_burst = &axgbe_recv_pkts;
357 
358 	return 0;
359 }
360 
361 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
362 static void
363 axgbe_dev_stop(struct rte_eth_dev *dev)
364 {
365 	struct axgbe_port *pdata = dev->data->dev_private;
366 
367 	PMD_INIT_FUNC_TRACE();
368 
369 	rte_intr_disable(&pdata->pci_dev->intr_handle);
370 
371 	if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
372 		return;
373 
374 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
375 	axgbe_dev_disable_tx(dev);
376 	axgbe_dev_disable_rx(dev);
377 
378 	pdata->phy_if.phy_stop(pdata);
379 	pdata->hw_if.exit(pdata);
380 	memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
381 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
382 }
383 
384 /* Clear all resources like TX/RX queues. */
385 static void
386 axgbe_dev_close(struct rte_eth_dev *dev)
387 {
388 	axgbe_dev_clear_queues(dev);
389 }
390 
391 static int
392 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
393 {
394 	struct axgbe_port *pdata = dev->data->dev_private;
395 
396 	PMD_INIT_FUNC_TRACE();
397 
398 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
399 
400 	return 0;
401 }
402 
403 static int
404 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
405 {
406 	struct axgbe_port *pdata = dev->data->dev_private;
407 
408 	PMD_INIT_FUNC_TRACE();
409 
410 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
411 
412 	return 0;
413 }
414 
415 static int
416 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
417 {
418 	struct axgbe_port *pdata = dev->data->dev_private;
419 
420 	PMD_INIT_FUNC_TRACE();
421 
422 	if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
423 		return 0;
424 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
425 
426 	return 0;
427 }
428 
429 static int
430 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
431 {
432 	struct axgbe_port *pdata = dev->data->dev_private;
433 
434 	PMD_INIT_FUNC_TRACE();
435 
436 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
437 		return 0;
438 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
439 
440 	return 0;
441 }
442 
443 static int
444 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
445 {
446 	struct axgbe_port *pdata = dev->data->dev_private;
447 
448 	/* Set Default MAC Addr */
449 	axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
450 
451 	return 0;
452 }
453 
454 static int
455 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
456 			      uint32_t index, uint32_t pool __rte_unused)
457 {
458 	struct axgbe_port *pdata = dev->data->dev_private;
459 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
460 
461 	if (index > hw_feat->addn_mac) {
462 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
463 		return -EINVAL;
464 	}
465 	axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
466 	return 0;
467 }
468 
469 static int
470 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
471 			  struct rte_eth_rss_reta_entry64 *reta_conf,
472 			  uint16_t reta_size)
473 {
474 	struct axgbe_port *pdata = dev->data->dev_private;
475 	unsigned int i, idx, shift;
476 	int ret;
477 
478 	if (!pdata->rss_enable) {
479 		PMD_DRV_LOG(ERR, "RSS not enabled\n");
480 		return -ENOTSUP;
481 	}
482 
483 	if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
484 		PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
485 		return -EINVAL;
486 	}
487 
488 	for (i = 0; i < reta_size; i++) {
489 		idx = i / RTE_RETA_GROUP_SIZE;
490 		shift = i % RTE_RETA_GROUP_SIZE;
491 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
492 			continue;
493 		pdata->rss_table[i] = reta_conf[idx].reta[shift];
494 	}
495 
496 	/* Program the lookup table */
497 	ret = axgbe_write_rss_lookup_table(pdata);
498 	return ret;
499 }
500 
501 static int
502 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
503 			 struct rte_eth_rss_reta_entry64 *reta_conf,
504 			 uint16_t reta_size)
505 {
506 	struct axgbe_port *pdata = dev->data->dev_private;
507 	unsigned int i, idx, shift;
508 
509 	if (!pdata->rss_enable) {
510 		PMD_DRV_LOG(ERR, "RSS not enabled\n");
511 		return -ENOTSUP;
512 	}
513 
514 	if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
515 		PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
516 		return -EINVAL;
517 	}
518 
519 	for (i = 0; i < reta_size; i++) {
520 		idx = i / RTE_RETA_GROUP_SIZE;
521 		shift = i % RTE_RETA_GROUP_SIZE;
522 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
523 			continue;
524 		reta_conf[idx].reta[shift] = pdata->rss_table[i];
525 	}
526 	return 0;
527 }
528 
529 static int
530 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
531 			  struct rte_eth_rss_conf *rss_conf)
532 {
533 	struct axgbe_port *pdata = dev->data->dev_private;
534 	int ret;
535 
536 	if (!pdata->rss_enable) {
537 		PMD_DRV_LOG(ERR, "RSS not enabled\n");
538 		return -ENOTSUP;
539 	}
540 
541 	if (rss_conf == NULL) {
542 		PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
543 		return -EINVAL;
544 	}
545 
546 	if (rss_conf->rss_key != NULL &&
547 	    rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) {
548 		rte_memcpy(pdata->rss_key, rss_conf->rss_key,
549 		       AXGBE_RSS_HASH_KEY_SIZE);
550 		/* Program the hash key */
551 		ret = axgbe_write_rss_hash_key(pdata);
552 		if (ret != 0)
553 			return ret;
554 	}
555 
556 	pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
557 
558 	if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
559 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
560 	if (pdata->rss_hf &
561 	    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
562 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
563 	if (pdata->rss_hf &
564 	    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
565 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
566 
567 	/* Set the RSS options */
568 	AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
569 
570 	return 0;
571 }
572 
573 static int
574 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
575 			    struct rte_eth_rss_conf *rss_conf)
576 {
577 	struct axgbe_port *pdata = dev->data->dev_private;
578 
579 	if (!pdata->rss_enable) {
580 		PMD_DRV_LOG(ERR, "RSS not enabled\n");
581 		return -ENOTSUP;
582 	}
583 
584 	if (rss_conf == NULL) {
585 		PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
586 		return -EINVAL;
587 	}
588 
589 	if (rss_conf->rss_key != NULL &&
590 	    rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) {
591 		rte_memcpy(rss_conf->rss_key, pdata->rss_key,
592 		       AXGBE_RSS_HASH_KEY_SIZE);
593 	}
594 	rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE;
595 	rss_conf->rss_hf = pdata->rss_hf;
596 	return 0;
597 }
598 
599 static void
600 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
601 {
602 	struct axgbe_port *pdata = dev->data->dev_private;
603 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
604 
605 	if (index > hw_feat->addn_mac) {
606 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
607 		return;
608 	}
609 	axgbe_set_mac_addn_addr(pdata, NULL, index);
610 }
611 
612 static int
613 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
614 				      struct rte_ether_addr *mc_addr_set,
615 				      uint32_t nb_mc_addr)
616 {
617 	struct axgbe_port *pdata = dev->data->dev_private;
618 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
619 	uint32_t index = 1; /* 0 is always default mac */
620 	uint32_t i;
621 
622 	if (nb_mc_addr > hw_feat->addn_mac) {
623 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
624 		return -EINVAL;
625 	}
626 
627 	/* clear unicast addresses */
628 	for (i = 1; i < hw_feat->addn_mac; i++) {
629 		if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
630 			continue;
631 		memset(&dev->data->mac_addrs[i], 0,
632 		       sizeof(struct rte_ether_addr));
633 	}
634 
635 	while (nb_mc_addr--)
636 		axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
637 
638 	return 0;
639 }
640 
641 static int
642 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
643 			    struct rte_ether_addr *mac_addr, uint8_t add)
644 {
645 	struct axgbe_port *pdata = dev->data->dev_private;
646 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
647 
648 	if (!hw_feat->hash_table_size) {
649 		PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
650 		return -ENOTSUP;
651 	}
652 
653 	axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add);
654 
655 	if (pdata->uc_hash_mac_addr > 0) {
656 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
657 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
658 	} else {
659 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
660 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
661 	}
662 	return 0;
663 }
664 
665 static int
666 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
667 {
668 	struct axgbe_port *pdata = dev->data->dev_private;
669 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
670 	uint32_t index;
671 
672 	if (!hw_feat->hash_table_size) {
673 		PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
674 		return -ENOTSUP;
675 	}
676 
677 	for (index = 0; index < pdata->hash_table_count; index++) {
678 		if (add)
679 			pdata->uc_hash_table[index] = ~0;
680 		else
681 			pdata->uc_hash_table[index] = 0;
682 
683 		PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
684 			    add ? "set" : "clear", index);
685 
686 		AXGMAC_IOWRITE(pdata, MAC_HTR(index),
687 			       pdata->uc_hash_table[index]);
688 	}
689 
690 	if (add) {
691 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
692 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
693 	} else {
694 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
695 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
696 	}
697 	return 0;
698 }
699 
700 /* return 0 means link status changed, -1 means not changed */
701 static int
702 axgbe_dev_link_update(struct rte_eth_dev *dev,
703 		      int wait_to_complete __rte_unused)
704 {
705 	struct axgbe_port *pdata = dev->data->dev_private;
706 	struct rte_eth_link link;
707 	int ret = 0;
708 
709 	PMD_INIT_FUNC_TRACE();
710 	rte_delay_ms(800);
711 
712 	pdata->phy_if.phy_status(pdata);
713 
714 	memset(&link, 0, sizeof(struct rte_eth_link));
715 	link.link_duplex = pdata->phy.duplex;
716 	link.link_status = pdata->phy_link;
717 	link.link_speed = pdata->phy_speed;
718 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
719 			      ETH_LINK_SPEED_FIXED);
720 	ret = rte_eth_linkstatus_set(dev, &link);
721 	if (ret == -1)
722 		PMD_DRV_LOG(ERR, "No change in link status\n");
723 
724 	return ret;
725 }
726 
727 static int
728 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
729 {
730 	struct axgbe_port *pdata = dev->data->dev_private;
731 
732 	if (regs->data == NULL) {
733 		regs->length = axgbe_regs_get_count(pdata);
734 		regs->width = sizeof(uint32_t);
735 		return 0;
736 	}
737 
738 	/* Only full register dump is supported */
739 	if (regs->length &&
740 	    regs->length != (uint32_t)axgbe_regs_get_count(pdata))
741 		return -ENOTSUP;
742 
743 	regs->version = pdata->pci_dev->id.vendor_id << 16 |
744 			pdata->pci_dev->id.device_id;
745 	axgbe_regs_dump(pdata, regs->data);
746 	return 0;
747 }
748 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
749 {
750 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
751 
752 	/* Freeze counters */
753 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
754 
755 	/* Tx counters */
756 	stats->txoctetcount_gb +=
757 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
758 	stats->txoctetcount_gb +=
759 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
760 
761 	stats->txframecount_gb +=
762 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
763 	stats->txframecount_gb +=
764 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
765 
766 	stats->txbroadcastframes_g +=
767 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
768 	stats->txbroadcastframes_g +=
769 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
770 
771 	stats->txmulticastframes_g +=
772 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
773 	stats->txmulticastframes_g +=
774 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
775 
776 	stats->tx64octets_gb +=
777 		AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
778 	stats->tx64octets_gb +=
779 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
780 
781 	stats->tx65to127octets_gb +=
782 		AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
783 	stats->tx65to127octets_gb +=
784 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
785 
786 	stats->tx128to255octets_gb +=
787 		AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
788 	stats->tx128to255octets_gb +=
789 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
790 
791 	stats->tx256to511octets_gb +=
792 		AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
793 	stats->tx256to511octets_gb +=
794 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
795 
796 	stats->tx512to1023octets_gb +=
797 		AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
798 	stats->tx512to1023octets_gb +=
799 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
800 
801 	stats->tx1024tomaxoctets_gb +=
802 		AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
803 	stats->tx1024tomaxoctets_gb +=
804 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
805 
806 	stats->txunicastframes_gb +=
807 		AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
808 	stats->txunicastframes_gb +=
809 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
810 
811 	stats->txmulticastframes_gb +=
812 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
813 	stats->txmulticastframes_gb +=
814 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
815 
816 	stats->txbroadcastframes_g +=
817 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
818 	stats->txbroadcastframes_g +=
819 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
820 
821 	stats->txunderflowerror +=
822 		AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
823 	stats->txunderflowerror +=
824 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
825 
826 	stats->txoctetcount_g +=
827 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
828 	stats->txoctetcount_g +=
829 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
830 
831 	stats->txframecount_g +=
832 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
833 	stats->txframecount_g +=
834 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
835 
836 	stats->txpauseframes +=
837 		AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
838 	stats->txpauseframes +=
839 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
840 
841 	stats->txvlanframes_g +=
842 		AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
843 	stats->txvlanframes_g +=
844 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
845 
846 	/* Rx counters */
847 	stats->rxframecount_gb +=
848 		AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
849 	stats->rxframecount_gb +=
850 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
851 
852 	stats->rxoctetcount_gb +=
853 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
854 	stats->rxoctetcount_gb +=
855 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
856 
857 	stats->rxoctetcount_g +=
858 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
859 	stats->rxoctetcount_g +=
860 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
861 
862 	stats->rxbroadcastframes_g +=
863 		AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
864 	stats->rxbroadcastframes_g +=
865 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
866 
867 	stats->rxmulticastframes_g +=
868 		AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
869 	stats->rxmulticastframes_g +=
870 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
871 
872 	stats->rxcrcerror +=
873 		AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
874 	stats->rxcrcerror +=
875 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
876 
877 	stats->rxrunterror +=
878 		AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
879 
880 	stats->rxjabbererror +=
881 		AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
882 
883 	stats->rxundersize_g +=
884 		AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
885 
886 	stats->rxoversize_g +=
887 		AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
888 
889 	stats->rx64octets_gb +=
890 		AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
891 	stats->rx64octets_gb +=
892 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
893 
894 	stats->rx65to127octets_gb +=
895 		AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
896 	stats->rx65to127octets_gb +=
897 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
898 
899 	stats->rx128to255octets_gb +=
900 		AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
901 	stats->rx128to255octets_gb +=
902 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
903 
904 	stats->rx256to511octets_gb +=
905 		AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
906 	stats->rx256to511octets_gb +=
907 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
908 
909 	stats->rx512to1023octets_gb +=
910 		AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
911 	stats->rx512to1023octets_gb +=
912 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
913 
914 	stats->rx1024tomaxoctets_gb +=
915 		AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
916 	stats->rx1024tomaxoctets_gb +=
917 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
918 
919 	stats->rxunicastframes_g +=
920 		AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
921 	stats->rxunicastframes_g +=
922 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
923 
924 	stats->rxlengtherror +=
925 		AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
926 	stats->rxlengtherror +=
927 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
928 
929 	stats->rxoutofrangetype +=
930 		AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
931 	stats->rxoutofrangetype +=
932 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
933 
934 	stats->rxpauseframes +=
935 		AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
936 	stats->rxpauseframes +=
937 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
938 
939 	stats->rxfifooverflow +=
940 		AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
941 	stats->rxfifooverflow +=
942 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
943 
944 	stats->rxvlanframes_gb +=
945 		AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
946 	stats->rxvlanframes_gb +=
947 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
948 
949 	stats->rxwatchdogerror +=
950 		AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
951 
952 	/* Un-freeze counters */
953 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
954 }
955 
956 static int
957 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
958 		     unsigned int n)
959 {
960 	struct axgbe_port *pdata = dev->data->dev_private;
961 	unsigned int i;
962 
963 	if (!stats)
964 		return 0;
965 
966 	axgbe_read_mmc_stats(pdata);
967 
968 	for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
969 		stats[i].id = i;
970 		stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
971 				axgbe_xstats_strings[i].offset);
972 	}
973 
974 	return i;
975 }
976 
977 static int
978 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
979 			   struct rte_eth_xstat_name *xstats_names,
980 			   unsigned int n)
981 {
982 	unsigned int i;
983 
984 	if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
985 		for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
986 			snprintf(xstats_names[i].name,
987 				 RTE_ETH_XSTATS_NAME_SIZE, "%s",
988 				 axgbe_xstats_strings[i].name);
989 		}
990 	}
991 
992 	return AXGBE_XSTATS_COUNT;
993 }
994 
995 static int
996 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
997 			   uint64_t *values, unsigned int n)
998 {
999 	unsigned int i;
1000 	uint64_t values_copy[AXGBE_XSTATS_COUNT];
1001 
1002 	if (!ids) {
1003 		struct axgbe_port *pdata = dev->data->dev_private;
1004 
1005 		if (n < AXGBE_XSTATS_COUNT)
1006 			return AXGBE_XSTATS_COUNT;
1007 
1008 		axgbe_read_mmc_stats(pdata);
1009 
1010 		for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
1011 			values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1012 					axgbe_xstats_strings[i].offset);
1013 		}
1014 
1015 		return i;
1016 	}
1017 
1018 	axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
1019 
1020 	for (i = 0; i < n; i++) {
1021 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
1022 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
1023 			return -1;
1024 		}
1025 		values[i] = values_copy[ids[i]];
1026 	}
1027 	return n;
1028 }
1029 
1030 static int
1031 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1032 				 struct rte_eth_xstat_name *xstats_names,
1033 				 const uint64_t *ids,
1034 				 unsigned int size)
1035 {
1036 	struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
1037 	unsigned int i;
1038 
1039 	if (!ids)
1040 		return axgbe_dev_xstats_get_names(dev, xstats_names, size);
1041 
1042 	axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
1043 
1044 	for (i = 0; i < size; i++) {
1045 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
1046 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
1047 			return -1;
1048 		}
1049 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1050 	}
1051 	return size;
1052 }
1053 
1054 static int
1055 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1056 {
1057 	struct axgbe_port *pdata = dev->data->dev_private;
1058 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1059 
1060 	/* MMC registers are configured for reset on read */
1061 	axgbe_read_mmc_stats(pdata);
1062 
1063 	/* Reset stats */
1064 	memset(stats, 0, sizeof(*stats));
1065 
1066 	return 0;
1067 }
1068 
1069 static int
1070 axgbe_dev_stats_get(struct rte_eth_dev *dev,
1071 		    struct rte_eth_stats *stats)
1072 {
1073 	struct axgbe_rx_queue *rxq;
1074 	struct axgbe_tx_queue *txq;
1075 	struct axgbe_port *pdata = dev->data->dev_private;
1076 	struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
1077 	unsigned int i;
1078 
1079 	axgbe_read_mmc_stats(pdata);
1080 
1081 	stats->imissed = mmc_stats->rxfifooverflow;
1082 
1083 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1084 		rxq = dev->data->rx_queues[i];
1085 		stats->q_ipackets[i] = rxq->pkts;
1086 		stats->ipackets += rxq->pkts;
1087 		stats->q_ibytes[i] = rxq->bytes;
1088 		stats->ibytes += rxq->bytes;
1089 		stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
1090 		stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed;
1091 		stats->ierrors += rxq->errors;
1092 	}
1093 
1094 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1095 		txq = dev->data->tx_queues[i];
1096 		stats->q_opackets[i] = txq->pkts;
1097 		stats->opackets += txq->pkts;
1098 		stats->q_obytes[i] = txq->bytes;
1099 		stats->obytes += txq->bytes;
1100 		stats->oerrors += txq->errors;
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 static int
1107 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
1108 {
1109 	struct axgbe_rx_queue *rxq;
1110 	struct axgbe_tx_queue *txq;
1111 	unsigned int i;
1112 
1113 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1114 		rxq = dev->data->rx_queues[i];
1115 		rxq->pkts = 0;
1116 		rxq->bytes = 0;
1117 		rxq->errors = 0;
1118 		rxq->rx_mbuf_alloc_failed = 0;
1119 	}
1120 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1121 		txq = dev->data->tx_queues[i];
1122 		txq->pkts = 0;
1123 		txq->bytes = 0;
1124 		txq->errors = 0;
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 static int
1131 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1132 {
1133 	struct axgbe_port *pdata = dev->data->dev_private;
1134 
1135 	dev_info->max_rx_queues = pdata->rx_ring_count;
1136 	dev_info->max_tx_queues = pdata->tx_ring_count;
1137 	dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
1138 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
1139 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
1140 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
1141 	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
1142 
1143 	dev_info->rx_offload_capa =
1144 		DEV_RX_OFFLOAD_IPV4_CKSUM |
1145 		DEV_RX_OFFLOAD_UDP_CKSUM  |
1146 		DEV_RX_OFFLOAD_TCP_CKSUM  |
1147 		DEV_RX_OFFLOAD_JUMBO_FRAME	|
1148 		DEV_RX_OFFLOAD_SCATTER	  |
1149 		DEV_RX_OFFLOAD_KEEP_CRC;
1150 
1151 	dev_info->tx_offload_capa =
1152 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
1153 		DEV_TX_OFFLOAD_UDP_CKSUM   |
1154 		DEV_TX_OFFLOAD_TCP_CKSUM;
1155 
1156 	if (pdata->hw_feat.rss) {
1157 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
1158 		dev_info->reta_size = pdata->hw_feat.hash_table_size;
1159 		dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
1160 	}
1161 
1162 	dev_info->rx_desc_lim = rx_desc_lim;
1163 	dev_info->tx_desc_lim = tx_desc_lim;
1164 
1165 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1166 		.rx_free_thresh = AXGBE_RX_FREE_THRESH,
1167 	};
1168 
1169 	dev_info->default_txconf = (struct rte_eth_txconf) {
1170 		.tx_free_thresh = AXGBE_TX_FREE_THRESH,
1171 	};
1172 
1173 	return 0;
1174 }
1175 
1176 static int
1177 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1178 {
1179 	struct axgbe_port *pdata = dev->data->dev_private;
1180 	struct xgbe_fc_info fc = pdata->fc;
1181 	unsigned int reg, reg_val = 0;
1182 
1183 	reg = MAC_Q0TFCR;
1184 	reg_val = AXGMAC_IOREAD(pdata, reg);
1185 	fc.low_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA);
1186 	fc.high_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD);
1187 	fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT);
1188 	fc.autoneg = pdata->pause_autoneg;
1189 
1190 	if (pdata->rx_pause && pdata->tx_pause)
1191 		fc.mode = RTE_FC_FULL;
1192 	else if (pdata->rx_pause)
1193 		fc.mode = RTE_FC_RX_PAUSE;
1194 	else if (pdata->tx_pause)
1195 		fc.mode = RTE_FC_TX_PAUSE;
1196 	else
1197 		fc.mode = RTE_FC_NONE;
1198 
1199 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
1200 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
1201 	fc_conf->pause_time = fc.pause_time[0];
1202 	fc_conf->send_xon = fc.send_xon;
1203 	fc_conf->mode = fc.mode;
1204 
1205 	return 0;
1206 }
1207 
1208 static int
1209 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1210 {
1211 	struct axgbe_port *pdata = dev->data->dev_private;
1212 	struct xgbe_fc_info fc = pdata->fc;
1213 	unsigned int reg, reg_val = 0;
1214 	reg = MAC_Q0TFCR;
1215 
1216 	pdata->pause_autoneg = fc_conf->autoneg;
1217 	pdata->phy.pause_autoneg = pdata->pause_autoneg;
1218 	fc.send_xon = fc_conf->send_xon;
1219 	AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA,
1220 			AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water));
1221 	AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD,
1222 			AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water));
1223 	AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time);
1224 	AXGMAC_IOWRITE(pdata, reg, reg_val);
1225 	fc.mode = fc_conf->mode;
1226 
1227 	if (fc.mode == RTE_FC_FULL) {
1228 		pdata->tx_pause = 1;
1229 		pdata->rx_pause = 1;
1230 	} else if (fc.mode == RTE_FC_RX_PAUSE) {
1231 		pdata->tx_pause = 0;
1232 		pdata->rx_pause = 1;
1233 	} else if (fc.mode == RTE_FC_TX_PAUSE) {
1234 		pdata->tx_pause = 1;
1235 		pdata->rx_pause = 0;
1236 	} else {
1237 		pdata->tx_pause = 0;
1238 		pdata->rx_pause = 0;
1239 	}
1240 
1241 	if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1242 		pdata->hw_if.config_tx_flow_control(pdata);
1243 
1244 	if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1245 		pdata->hw_if.config_rx_flow_control(pdata);
1246 
1247 	pdata->hw_if.config_flow_control(pdata);
1248 	pdata->phy.tx_pause = pdata->tx_pause;
1249 	pdata->phy.rx_pause = pdata->rx_pause;
1250 
1251 	return 0;
1252 }
1253 
1254 static int
1255 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
1256 		struct rte_eth_pfc_conf *pfc_conf)
1257 {
1258 	struct axgbe_port *pdata = dev->data->dev_private;
1259 	struct xgbe_fc_info fc = pdata->fc;
1260 	uint8_t tc_num;
1261 
1262 	tc_num = pdata->pfc_map[pfc_conf->priority];
1263 
1264 	if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) {
1265 		PMD_INIT_LOG(ERR, "Max supported  traffic class: %d\n",
1266 				pdata->hw_feat.tc_cnt);
1267 	return -EINVAL;
1268 	}
1269 
1270 	pdata->pause_autoneg = pfc_conf->fc.autoneg;
1271 	pdata->phy.pause_autoneg = pdata->pause_autoneg;
1272 	fc.send_xon = pfc_conf->fc.send_xon;
1273 	AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA,
1274 		AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water));
1275 	AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD,
1276 		AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water));
1277 
1278 	switch (tc_num) {
1279 	case 0:
1280 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1281 				PSTC0, pfc_conf->fc.pause_time);
1282 		break;
1283 	case 1:
1284 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1285 				PSTC1, pfc_conf->fc.pause_time);
1286 		break;
1287 	case 2:
1288 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1289 				PSTC2, pfc_conf->fc.pause_time);
1290 		break;
1291 	case 3:
1292 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1293 				PSTC3, pfc_conf->fc.pause_time);
1294 		break;
1295 	case 4:
1296 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1297 				PSTC4, pfc_conf->fc.pause_time);
1298 		break;
1299 	case 5:
1300 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1301 				PSTC5, pfc_conf->fc.pause_time);
1302 		break;
1303 	case 7:
1304 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1305 				PSTC6, pfc_conf->fc.pause_time);
1306 		break;
1307 	case 6:
1308 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1309 				PSTC7, pfc_conf->fc.pause_time);
1310 		break;
1311 	}
1312 
1313 	fc.mode = pfc_conf->fc.mode;
1314 
1315 	if (fc.mode == RTE_FC_FULL) {
1316 		pdata->tx_pause = 1;
1317 		pdata->rx_pause = 1;
1318 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1319 	} else if (fc.mode == RTE_FC_RX_PAUSE) {
1320 		pdata->tx_pause = 0;
1321 		pdata->rx_pause = 1;
1322 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1323 	} else if (fc.mode == RTE_FC_TX_PAUSE) {
1324 		pdata->tx_pause = 1;
1325 		pdata->rx_pause = 0;
1326 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1327 	} else {
1328 		pdata->tx_pause = 0;
1329 		pdata->rx_pause = 0;
1330 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1331 	}
1332 
1333 	if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1334 		pdata->hw_if.config_tx_flow_control(pdata);
1335 
1336 	if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1337 		pdata->hw_if.config_rx_flow_control(pdata);
1338 	pdata->hw_if.config_flow_control(pdata);
1339 	pdata->phy.tx_pause = pdata->tx_pause;
1340 	pdata->phy.rx_pause = pdata->rx_pause;
1341 
1342 	return 0;
1343 }
1344 
1345 void
1346 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1347 	struct rte_eth_rxq_info *qinfo)
1348 {
1349 	struct   axgbe_rx_queue *rxq;
1350 
1351 	rxq = dev->data->rx_queues[queue_id];
1352 	qinfo->mp = rxq->mb_pool;
1353 	qinfo->scattered_rx = dev->data->scattered_rx;
1354 	qinfo->nb_desc = rxq->nb_desc;
1355 	qinfo->conf.rx_free_thresh = rxq->free_thresh;
1356 }
1357 
1358 void
1359 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1360 	struct rte_eth_txq_info *qinfo)
1361 {
1362 	struct  axgbe_tx_queue *txq;
1363 
1364 	txq = dev->data->tx_queues[queue_id];
1365 	qinfo->nb_desc = txq->nb_desc;
1366 	qinfo->conf.tx_free_thresh = txq->free_thresh;
1367 }
1368 const uint32_t *
1369 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1370 {
1371 	static const uint32_t ptypes[] = {
1372 		RTE_PTYPE_L2_ETHER,
1373 		RTE_PTYPE_L2_ETHER_TIMESYNC,
1374 		RTE_PTYPE_L2_ETHER_LLDP,
1375 		RTE_PTYPE_L2_ETHER_ARP,
1376 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1377 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1378 		RTE_PTYPE_L4_FRAG,
1379 		RTE_PTYPE_L4_ICMP,
1380 		RTE_PTYPE_L4_NONFRAG,
1381 		RTE_PTYPE_L4_SCTP,
1382 		RTE_PTYPE_L4_TCP,
1383 		RTE_PTYPE_L4_UDP,
1384 		RTE_PTYPE_TUNNEL_GRENAT,
1385 		RTE_PTYPE_TUNNEL_IP,
1386 		RTE_PTYPE_INNER_L2_ETHER,
1387 		RTE_PTYPE_INNER_L2_ETHER_VLAN,
1388 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1389 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1390 		RTE_PTYPE_INNER_L4_FRAG,
1391 		RTE_PTYPE_INNER_L4_ICMP,
1392 		RTE_PTYPE_INNER_L4_NONFRAG,
1393 		RTE_PTYPE_INNER_L4_SCTP,
1394 		RTE_PTYPE_INNER_L4_TCP,
1395 		RTE_PTYPE_INNER_L4_UDP,
1396 		RTE_PTYPE_UNKNOWN
1397 	};
1398 
1399 	if (dev->rx_pkt_burst == axgbe_recv_pkts)
1400 		return ptypes;
1401 	return NULL;
1402 }
1403 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1404 {
1405 	struct rte_eth_dev_info dev_info;
1406 	struct axgbe_port *pdata = dev->data->dev_private;
1407 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1408 	unsigned int val = 0;
1409 	axgbe_dev_info_get(dev, &dev_info);
1410 	/* check that mtu is within the allowed range */
1411 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1412 		return -EINVAL;
1413 	/* mtu setting is forbidden if port is start */
1414 	if (dev->data->dev_started) {
1415 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1416 				dev->data->port_id);
1417 		return -EBUSY;
1418 	}
1419 	if (frame_size > RTE_ETHER_MAX_LEN) {
1420 		dev->data->dev_conf.rxmode.offloads |=
1421 			DEV_RX_OFFLOAD_JUMBO_FRAME;
1422 		val = 1;
1423 	} else {
1424 		dev->data->dev_conf.rxmode.offloads &=
1425 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
1426 		val = 0;
1427 	}
1428 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1429 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1430 	return 0;
1431 }
1432 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
1433 {
1434 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
1435 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1436 
1437 	mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
1438 	mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
1439 	mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
1440 
1441 	memset(hw_feat, 0, sizeof(*hw_feat));
1442 
1443 	hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
1444 
1445 	/* Hardware feature register 0 */
1446 	hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
1447 	hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
1448 	hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
1449 	hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
1450 	hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
1451 	hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
1452 	hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
1453 	hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
1454 	hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
1455 	hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
1456 	hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
1457 	hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
1458 					      ADDMACADRSEL);
1459 	hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
1460 	hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
1461 
1462 	/* Hardware feature register 1 */
1463 	hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1464 						RXFIFOSIZE);
1465 	hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1466 						TXFIFOSIZE);
1467 	hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
1468 						 MAC_HWF1R, ADVTHWORD);
1469 	hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
1470 	hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
1471 	hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
1472 	hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
1473 	hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
1474 	hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
1475 	hw_feat->tc_cnt	       = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
1476 	hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1477 						  HASHTBLSZ);
1478 	hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1479 						  L3L4FNUM);
1480 
1481 	/* Hardware feature register 2 */
1482 	hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
1483 	hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
1484 	hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
1485 	hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
1486 	hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
1487 	hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
1488 						AUXSNAPNUM);
1489 
1490 	/* Translate the Hash Table size into actual number */
1491 	switch (hw_feat->hash_table_size) {
1492 	case 0:
1493 		break;
1494 	case 1:
1495 		hw_feat->hash_table_size = 64;
1496 		break;
1497 	case 2:
1498 		hw_feat->hash_table_size = 128;
1499 		break;
1500 	case 3:
1501 		hw_feat->hash_table_size = 256;
1502 		break;
1503 	}
1504 
1505 	/* Translate the address width setting into actual number */
1506 	switch (hw_feat->dma_width) {
1507 	case 0:
1508 		hw_feat->dma_width = 32;
1509 		break;
1510 	case 1:
1511 		hw_feat->dma_width = 40;
1512 		break;
1513 	case 2:
1514 		hw_feat->dma_width = 48;
1515 		break;
1516 	default:
1517 		hw_feat->dma_width = 32;
1518 	}
1519 
1520 	/* The Queue, Channel and TC counts are zero based so increment them
1521 	 * to get the actual number
1522 	 */
1523 	hw_feat->rx_q_cnt++;
1524 	hw_feat->tx_q_cnt++;
1525 	hw_feat->rx_ch_cnt++;
1526 	hw_feat->tx_ch_cnt++;
1527 	hw_feat->tc_cnt++;
1528 
1529 	/* Translate the fifo sizes into actual numbers */
1530 	hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
1531 	hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
1532 }
1533 
1534 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
1535 {
1536 	axgbe_init_function_ptrs_dev(&pdata->hw_if);
1537 	axgbe_init_function_ptrs_phy(&pdata->phy_if);
1538 	axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
1539 	pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
1540 }
1541 
1542 static void axgbe_set_counts(struct axgbe_port *pdata)
1543 {
1544 	/* Set all the function pointers */
1545 	axgbe_init_all_fptrs(pdata);
1546 
1547 	/* Populate the hardware features */
1548 	axgbe_get_all_hw_features(pdata);
1549 
1550 	/* Set default max values if not provided */
1551 	if (!pdata->tx_max_channel_count)
1552 		pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
1553 	if (!pdata->rx_max_channel_count)
1554 		pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
1555 
1556 	if (!pdata->tx_max_q_count)
1557 		pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
1558 	if (!pdata->rx_max_q_count)
1559 		pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
1560 
1561 	/* Calculate the number of Tx and Rx rings to be created
1562 	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
1563 	 *   the number of Tx queues to the number of Tx channels
1564 	 *   enabled
1565 	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
1566 	 *   number of Rx queues or maximum allowed
1567 	 */
1568 	pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
1569 				     pdata->tx_max_channel_count);
1570 	pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
1571 				     pdata->tx_max_q_count);
1572 
1573 	pdata->tx_q_count = pdata->tx_ring_count;
1574 
1575 	pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
1576 				     pdata->rx_max_channel_count);
1577 
1578 	pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
1579 				  pdata->rx_max_q_count);
1580 }
1581 
1582 static void axgbe_default_config(struct axgbe_port *pdata)
1583 {
1584 	pdata->pblx8 = DMA_PBL_X8_ENABLE;
1585 	pdata->tx_sf_mode = MTL_TSF_ENABLE;
1586 	pdata->tx_threshold = MTL_TX_THRESHOLD_64;
1587 	pdata->tx_pbl = DMA_PBL_32;
1588 	pdata->tx_osp_mode = DMA_OSP_ENABLE;
1589 	pdata->rx_sf_mode = MTL_RSF_ENABLE;
1590 	pdata->rx_threshold = MTL_RX_THRESHOLD_64;
1591 	pdata->rx_pbl = DMA_PBL_32;
1592 	pdata->pause_autoneg = 1;
1593 	pdata->tx_pause = 0;
1594 	pdata->rx_pause = 0;
1595 	pdata->phy_speed = SPEED_UNKNOWN;
1596 	pdata->power_down = 0;
1597 }
1598 
1599 static int
1600 pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
1601 {
1602 	const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
1603 	const struct rte_pci_id *pcid = _pci_id;
1604 
1605 	if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
1606 			pdev->id.device_id == pcid->device_id)
1607 		return 0;
1608 	return 1;
1609 }
1610 
1611 static bool
1612 pci_search_device(int device_id)
1613 {
1614 	struct rte_bus *pci_bus;
1615 	struct rte_pci_id dev_id;
1616 
1617 	dev_id.device_id = device_id;
1618 	pci_bus = rte_bus_find_by_name("pci");
1619 	return (pci_bus != NULL) &&
1620 		(pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
1621 }
1622 
1623 /*
1624  * It returns 0 on success.
1625  */
1626 static int
1627 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
1628 {
1629 	PMD_INIT_FUNC_TRACE();
1630 	struct axgbe_port *pdata;
1631 	struct rte_pci_device *pci_dev;
1632 	uint32_t reg, mac_lo, mac_hi;
1633 	uint32_t len;
1634 	int ret;
1635 
1636 	eth_dev->dev_ops = &axgbe_eth_dev_ops;
1637 
1638 	/*
1639 	 * For secondary processes, we don't initialise any further as primary
1640 	 * has already done this work.
1641 	 */
1642 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1643 		return 0;
1644 
1645 	pdata = eth_dev->data->dev_private;
1646 	/* initial state */
1647 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
1648 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
1649 	pdata->eth_dev = eth_dev;
1650 
1651 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1652 	pdata->pci_dev = pci_dev;
1653 
1654 	/*
1655 	 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
1656 	 */
1657 	if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
1658 		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
1659 		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
1660 	} else {
1661 		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
1662 		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
1663 	}
1664 
1665 	pdata->xgmac_regs =
1666 		(void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
1667 	pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
1668 				     + AXGBE_MAC_PROP_OFFSET);
1669 	pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
1670 				    + AXGBE_I2C_CTRL_OFFSET);
1671 	pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
1672 
1673 	/* version specific driver data*/
1674 	if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
1675 		pdata->vdata = &axgbe_v2a;
1676 	else
1677 		pdata->vdata = &axgbe_v2b;
1678 
1679 	/* Configure the PCS indirect addressing support */
1680 	reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
1681 	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
1682 	pdata->xpcs_window <<= 6;
1683 	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
1684 	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
1685 	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
1686 
1687 	PMD_INIT_LOG(DEBUG,
1688 		     "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
1689 		     pdata->xpcs_window_size, pdata->xpcs_window_mask);
1690 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
1691 
1692 	/* Retrieve the MAC address */
1693 	mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
1694 	mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
1695 	pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
1696 	pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
1697 	pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
1698 	pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
1699 	pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
1700 	pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
1701 
1702 	len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
1703 	eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
1704 
1705 	if (!eth_dev->data->mac_addrs) {
1706 		PMD_INIT_LOG(ERR,
1707 			     "Failed to alloc %u bytes needed to "
1708 			     "store MAC addresses", len);
1709 		return -ENOMEM;
1710 	}
1711 
1712 	/* Allocate memory for storing hash filter MAC addresses */
1713 	len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS;
1714 	eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr",
1715 						    len, 0);
1716 
1717 	if (eth_dev->data->hash_mac_addrs == NULL) {
1718 		PMD_INIT_LOG(ERR,
1719 			     "Failed to allocate %d bytes needed to "
1720 			     "store MAC addresses", len);
1721 		return -ENOMEM;
1722 	}
1723 
1724 	if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
1725 		rte_eth_random_addr(pdata->mac_addr.addr_bytes);
1726 
1727 	/* Copy the permanent MAC address */
1728 	rte_ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
1729 
1730 	/* Clock settings */
1731 	pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
1732 	pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1733 
1734 	/* Set the DMA coherency values */
1735 	pdata->coherent = 1;
1736 	pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
1737 	pdata->arcache = AXGBE_DMA_OS_ARCACHE;
1738 	pdata->awcache = AXGBE_DMA_OS_AWCACHE;
1739 
1740 	/* Set the maximum channels and queues */
1741 	reg = XP_IOREAD(pdata, XP_PROP_1);
1742 	pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
1743 	pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
1744 	pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
1745 	pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
1746 
1747 	/* Set the hardware channel and queue counts */
1748 	axgbe_set_counts(pdata);
1749 
1750 	/* Set the maximum fifo amounts */
1751 	reg = XP_IOREAD(pdata, XP_PROP_2);
1752 	pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
1753 	pdata->tx_max_fifo_size *= 16384;
1754 	pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1755 					  pdata->vdata->tx_max_fifo_size);
1756 	pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
1757 	pdata->rx_max_fifo_size *= 16384;
1758 	pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1759 					  pdata->vdata->rx_max_fifo_size);
1760 	/* Issue software reset to DMA */
1761 	ret = pdata->hw_if.exit(pdata);
1762 	if (ret)
1763 		PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
1764 
1765 	/* Set default configuration data */
1766 	axgbe_default_config(pdata);
1767 
1768 	/* Set default max values if not provided */
1769 	if (!pdata->tx_max_fifo_size)
1770 		pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
1771 	if (!pdata->rx_max_fifo_size)
1772 		pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
1773 
1774 	pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
1775 	pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
1776 	pthread_mutex_init(&pdata->xpcs_mutex, NULL);
1777 	pthread_mutex_init(&pdata->i2c_mutex, NULL);
1778 	pthread_mutex_init(&pdata->an_mutex, NULL);
1779 	pthread_mutex_init(&pdata->phy_mutex, NULL);
1780 
1781 	ret = pdata->phy_if.phy_init(pdata);
1782 	if (ret) {
1783 		rte_free(eth_dev->data->mac_addrs);
1784 		eth_dev->data->mac_addrs = NULL;
1785 		return ret;
1786 	}
1787 
1788 	rte_intr_callback_register(&pci_dev->intr_handle,
1789 				   axgbe_dev_interrupt_handler,
1790 				   (void *)eth_dev);
1791 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1792 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
1793 		     pci_dev->id.device_id);
1794 
1795 	return 0;
1796 }
1797 
1798 static int
1799 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1800 {
1801 	struct rte_pci_device *pci_dev;
1802 
1803 	PMD_INIT_FUNC_TRACE();
1804 
1805 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1806 		return 0;
1807 
1808 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1809 	eth_dev->dev_ops = NULL;
1810 	eth_dev->rx_pkt_burst = NULL;
1811 	eth_dev->tx_pkt_burst = NULL;
1812 	axgbe_dev_clear_queues(eth_dev);
1813 
1814 	/* disable uio intr before callback unregister */
1815 	rte_intr_disable(&pci_dev->intr_handle);
1816 	rte_intr_callback_unregister(&pci_dev->intr_handle,
1817 				     axgbe_dev_interrupt_handler,
1818 				     (void *)eth_dev);
1819 
1820 	return 0;
1821 }
1822 
1823 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1824 	struct rte_pci_device *pci_dev)
1825 {
1826 	return rte_eth_dev_pci_generic_probe(pci_dev,
1827 		sizeof(struct axgbe_port), eth_axgbe_dev_init);
1828 }
1829 
1830 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
1831 {
1832 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
1833 }
1834 
1835 static struct rte_pci_driver rte_axgbe_pmd = {
1836 	.id_table = pci_id_axgbe_map,
1837 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1838 	.probe = eth_axgbe_pci_probe,
1839 	.remove = eth_axgbe_pci_remove,
1840 };
1841 
1842 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
1843 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
1844 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1845 
1846 RTE_INIT(axgbe_init_log)
1847 {
1848 	axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
1849 	if (axgbe_logtype_init >= 0)
1850 		rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
1851 	axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
1852 	if (axgbe_logtype_driver >= 0)
1853 		rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
1854 }
1855