xref: /dpdk/drivers/net/axgbe/axgbe_ethdev.c (revision bf403cfe3f0db4ecaeaaf1a25420bd367ceaf8b8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10 #include "axgbe_regs.h"
11 #include "rte_time.h"
12 
13 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
14 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
15 static int  axgbe_dev_start(struct rte_eth_dev *dev);
16 static int  axgbe_dev_stop(struct rte_eth_dev *dev);
17 static void axgbe_dev_interrupt_handler(void *param);
18 static int axgbe_dev_close(struct rte_eth_dev *dev);
19 static int axgbe_dev_reset(struct rte_eth_dev *dev);
20 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
21 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
22 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
23 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
24 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
25 				  struct rte_ether_addr *mac_addr);
26 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
27 				  struct rte_ether_addr *mac_addr,
28 				  uint32_t index,
29 				  uint32_t vmdq);
30 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
31 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
32 				      struct rte_ether_addr *mc_addr_set,
33 				      uint32_t nb_mc_addr);
34 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
35 				       struct rte_ether_addr *mac_addr,
36 				       uint8_t add);
37 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev,
38 					   uint8_t add);
39 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
40 				 int wait_to_complete);
41 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
42 			      struct rte_dev_reg_info *regs);
43 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
44 				struct rte_eth_stats *stats);
45 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
46 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
47 				struct rte_eth_xstat *stats,
48 				unsigned int n);
49 static int
50 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
51 			   struct rte_eth_xstat_name *xstats_names,
52 			   unsigned int size);
53 static int
54 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
55 			   const uint64_t *ids,
56 			   uint64_t *values,
57 			   unsigned int n);
58 static int
59 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
60 				 struct rte_eth_xstat_name *xstats_names,
61 				 const uint64_t *ids,
62 				 unsigned int size);
63 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
64 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
65 			  struct rte_eth_rss_reta_entry64 *reta_conf,
66 			  uint16_t reta_size);
67 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
68 			 struct rte_eth_rss_reta_entry64 *reta_conf,
69 			 uint16_t reta_size);
70 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
71 				     struct rte_eth_rss_conf *rss_conf);
72 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
73 				       struct rte_eth_rss_conf *rss_conf);
74 static int  axgbe_dev_info_get(struct rte_eth_dev *dev,
75 			       struct rte_eth_dev_info *dev_info);
76 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev,
77 				struct rte_eth_fc_conf *fc_conf);
78 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev,
79 				struct rte_eth_fc_conf *fc_conf);
80 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
81 				struct rte_eth_pfc_conf *pfc_conf);
82 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
83 	struct rte_eth_rxq_info *qinfo);
84 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
85 	struct rte_eth_txq_info *qinfo);
86 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
87 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
88 
89 static int
90 axgbe_timesync_enable(struct rte_eth_dev *dev);
91 static int
92 axgbe_timesync_disable(struct rte_eth_dev *dev);
93 static int
94 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
95 			struct timespec *timestamp, uint32_t flags);
96 static int
97 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
98 			struct timespec *timestamp);
99 static int
100 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
101 static int
102 axgbe_timesync_read_time(struct rte_eth_dev *dev,
103 			struct timespec *timestamp);
104 static int
105 axgbe_timesync_write_time(struct rte_eth_dev *dev,
106 			const struct timespec *timestamp);
107 static void
108 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
109 			unsigned int nsec);
110 static void
111 axgbe_update_tstamp_addend(struct axgbe_port *pdata,
112 			unsigned int addend);
113 
114 struct axgbe_xstats {
115 	char name[RTE_ETH_XSTATS_NAME_SIZE];
116 	int offset;
117 };
118 
119 #define AXGMAC_MMC_STAT(_string, _var)                           \
120 	{ _string,                                              \
121 	  offsetof(struct axgbe_mmc_stats, _var),       \
122 	}
123 
124 static const struct axgbe_xstats axgbe_xstats_strings[] = {
125 	AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
126 	AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
127 	AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
128 	AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
129 	AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
130 	AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
131 	AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
132 	AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
133 	AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
134 	AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
135 	AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
136 	AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
137 	AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
138 	AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
139 
140 	AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
141 	AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
142 	AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
143 	AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
144 	AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
145 	AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
146 	AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
147 	AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
148 	AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
149 	AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
150 	AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
151 	AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
152 	AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
153 	AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
154 	AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
155 	AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
156 	AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
157 	AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
158 	AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
159 	AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
160 	AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
161 	AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
162 };
163 
164 #define AXGBE_XSTATS_COUNT        ARRAY_SIZE(axgbe_xstats_strings)
165 
166 /* The set of PCI devices this driver supports */
167 #define AMD_PCI_VENDOR_ID       0x1022
168 #define AMD_PCI_RV_ROOT_COMPLEX_ID	0x15d0
169 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
170 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
171 
172 static const struct rte_pci_id pci_id_axgbe_map[] = {
173 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
174 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
175 	{ .vendor_id = 0, },
176 };
177 
178 static struct axgbe_version_data axgbe_v2a = {
179 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
180 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
181 	.mmc_64bit			= 1,
182 	.tx_max_fifo_size		= 229376,
183 	.rx_max_fifo_size		= 229376,
184 	.tx_tstamp_workaround		= 1,
185 	.ecc_support			= 1,
186 	.i2c_support			= 1,
187 	.an_cdr_workaround		= 1,
188 };
189 
190 static struct axgbe_version_data axgbe_v2b = {
191 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
192 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
193 	.mmc_64bit			= 1,
194 	.tx_max_fifo_size		= 65536,
195 	.rx_max_fifo_size		= 65536,
196 	.tx_tstamp_workaround		= 1,
197 	.ecc_support			= 1,
198 	.i2c_support			= 1,
199 	.an_cdr_workaround		= 1,
200 };
201 
202 static const struct rte_eth_desc_lim rx_desc_lim = {
203 	.nb_max = AXGBE_MAX_RING_DESC,
204 	.nb_min = AXGBE_MIN_RING_DESC,
205 	.nb_align = 8,
206 };
207 
208 static const struct rte_eth_desc_lim tx_desc_lim = {
209 	.nb_max = AXGBE_MAX_RING_DESC,
210 	.nb_min = AXGBE_MIN_RING_DESC,
211 	.nb_align = 8,
212 };
213 
214 static const struct eth_dev_ops axgbe_eth_dev_ops = {
215 	.dev_configure        = axgbe_dev_configure,
216 	.dev_start            = axgbe_dev_start,
217 	.dev_stop             = axgbe_dev_stop,
218 	.dev_close            = axgbe_dev_close,
219 	.dev_reset            = axgbe_dev_reset,
220 	.promiscuous_enable   = axgbe_dev_promiscuous_enable,
221 	.promiscuous_disable  = axgbe_dev_promiscuous_disable,
222 	.allmulticast_enable  = axgbe_dev_allmulticast_enable,
223 	.allmulticast_disable = axgbe_dev_allmulticast_disable,
224 	.mac_addr_set         = axgbe_dev_mac_addr_set,
225 	.mac_addr_add         = axgbe_dev_mac_addr_add,
226 	.mac_addr_remove      = axgbe_dev_mac_addr_remove,
227 	.set_mc_addr_list     = axgbe_dev_set_mc_addr_list,
228 	.uc_hash_table_set    = axgbe_dev_uc_hash_table_set,
229 	.uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set,
230 	.link_update          = axgbe_dev_link_update,
231 	.get_reg	      = axgbe_dev_get_regs,
232 	.stats_get            = axgbe_dev_stats_get,
233 	.stats_reset          = axgbe_dev_stats_reset,
234 	.xstats_get	      = axgbe_dev_xstats_get,
235 	.xstats_reset	      = axgbe_dev_xstats_reset,
236 	.xstats_get_names     = axgbe_dev_xstats_get_names,
237 	.xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
238 	.xstats_get_by_id     = axgbe_dev_xstats_get_by_id,
239 	.reta_update          = axgbe_dev_rss_reta_update,
240 	.reta_query           = axgbe_dev_rss_reta_query,
241 	.rss_hash_update      = axgbe_dev_rss_hash_update,
242 	.rss_hash_conf_get    = axgbe_dev_rss_hash_conf_get,
243 	.dev_infos_get        = axgbe_dev_info_get,
244 	.rx_queue_setup       = axgbe_dev_rx_queue_setup,
245 	.rx_queue_release     = axgbe_dev_rx_queue_release,
246 	.tx_queue_setup       = axgbe_dev_tx_queue_setup,
247 	.tx_queue_release     = axgbe_dev_tx_queue_release,
248 	.flow_ctrl_get        = axgbe_flow_ctrl_get,
249 	.flow_ctrl_set        = axgbe_flow_ctrl_set,
250 	.priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set,
251 	.rxq_info_get                 = axgbe_rxq_info_get,
252 	.txq_info_get                 = axgbe_txq_info_get,
253 	.dev_supported_ptypes_get     = axgbe_dev_supported_ptypes_get,
254 	.mtu_set		= axgb_mtu_set,
255 	.timesync_enable              = axgbe_timesync_enable,
256 	.timesync_disable             = axgbe_timesync_disable,
257 	.timesync_read_rx_timestamp   = axgbe_timesync_read_rx_timestamp,
258 	.timesync_read_tx_timestamp   = axgbe_timesync_read_tx_timestamp,
259 	.timesync_adjust_time         = axgbe_timesync_adjust_time,
260 	.timesync_read_time           = axgbe_timesync_read_time,
261 	.timesync_write_time          = axgbe_timesync_write_time,
262 };
263 
264 static int axgbe_phy_reset(struct axgbe_port *pdata)
265 {
266 	pdata->phy_link = -1;
267 	pdata->phy_speed = SPEED_UNKNOWN;
268 	return pdata->phy_if.phy_reset(pdata);
269 }
270 
271 /*
272  * Interrupt handler triggered by NIC  for handling
273  * specific interrupt.
274  *
275  * @param handle
276  *  Pointer to interrupt handle.
277  * @param param
278  *  The address of parameter (struct rte_eth_dev *) regsitered before.
279  *
280  * @return
281  *  void
282  */
283 static void
284 axgbe_dev_interrupt_handler(void *param)
285 {
286 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
287 	struct axgbe_port *pdata = dev->data->dev_private;
288 	unsigned int dma_isr, dma_ch_isr;
289 
290 	pdata->phy_if.an_isr(pdata);
291 	/*DMA related interrupts*/
292 	dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
293 	PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
294 	if (dma_isr) {
295 		if (dma_isr & 1) {
296 			dma_ch_isr =
297 				AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
298 						  pdata->rx_queues[0],
299 						  DMA_CH_SR);
300 			PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
301 			AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
302 					   pdata->rx_queues[0],
303 					   DMA_CH_SR, dma_ch_isr);
304 		}
305 	}
306 	/* Unmask interrupts since disabled after generation */
307 	rte_intr_ack(&pdata->pci_dev->intr_handle);
308 }
309 
310 /*
311  * Configure device link speed and setup link.
312  * It returns 0 on success.
313  */
314 static int
315 axgbe_dev_configure(struct rte_eth_dev *dev)
316 {
317 	struct axgbe_port *pdata =  dev->data->dev_private;
318 	/* Checksum offload to hardware */
319 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
320 				DEV_RX_OFFLOAD_CHECKSUM;
321 	return 0;
322 }
323 
324 static int
325 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
326 {
327 	struct axgbe_port *pdata = dev->data->dev_private;
328 
329 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
330 		pdata->rss_enable = 1;
331 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
332 		pdata->rss_enable = 0;
333 	else
334 		return  -1;
335 	return 0;
336 }
337 
338 static int
339 axgbe_dev_start(struct rte_eth_dev *dev)
340 {
341 	struct axgbe_port *pdata = dev->data->dev_private;
342 	int ret;
343 	struct rte_eth_dev_data *dev_data = dev->data;
344 	uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
345 
346 	dev->dev_ops = &axgbe_eth_dev_ops;
347 
348 	PMD_INIT_FUNC_TRACE();
349 
350 	/* Multiqueue RSS */
351 	ret = axgbe_dev_rx_mq_config(dev);
352 	if (ret) {
353 		PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
354 		return ret;
355 	}
356 	ret = axgbe_phy_reset(pdata);
357 	if (ret) {
358 		PMD_DRV_LOG(ERR, "phy reset failed\n");
359 		return ret;
360 	}
361 	ret = pdata->hw_if.init(pdata);
362 	if (ret) {
363 		PMD_DRV_LOG(ERR, "dev_init failed\n");
364 		return ret;
365 	}
366 
367 	/* enable uio/vfio intr/eventfd mapping */
368 	rte_intr_enable(&pdata->pci_dev->intr_handle);
369 
370 	/* phy start*/
371 	pdata->phy_if.phy_start(pdata);
372 	axgbe_dev_enable_tx(dev);
373 	axgbe_dev_enable_rx(dev);
374 
375 	rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
376 	rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
377 	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
378 				max_pkt_len > pdata->rx_buf_size)
379 		dev_data->scattered_rx = 1;
380 
381 	/*  Scatter Rx handling */
382 	if (dev_data->scattered_rx)
383 		dev->rx_pkt_burst = &eth_axgbe_recv_scattered_pkts;
384 	else
385 		dev->rx_pkt_burst = &axgbe_recv_pkts;
386 
387 	return 0;
388 }
389 
390 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
391 static int
392 axgbe_dev_stop(struct rte_eth_dev *dev)
393 {
394 	struct axgbe_port *pdata = dev->data->dev_private;
395 
396 	PMD_INIT_FUNC_TRACE();
397 
398 	rte_intr_disable(&pdata->pci_dev->intr_handle);
399 
400 	if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state))
401 		return 0;
402 
403 	rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
404 	axgbe_dev_disable_tx(dev);
405 	axgbe_dev_disable_rx(dev);
406 
407 	pdata->phy_if.phy_stop(pdata);
408 	pdata->hw_if.exit(pdata);
409 	memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
410 	rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
411 
412 	return 0;
413 }
414 
415 static int
416 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
417 {
418 	struct axgbe_port *pdata = dev->data->dev_private;
419 
420 	PMD_INIT_FUNC_TRACE();
421 
422 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
423 
424 	return 0;
425 }
426 
427 static int
428 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
429 {
430 	struct axgbe_port *pdata = dev->data->dev_private;
431 
432 	PMD_INIT_FUNC_TRACE();
433 
434 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
435 
436 	return 0;
437 }
438 
439 static int
440 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
441 {
442 	struct axgbe_port *pdata = dev->data->dev_private;
443 
444 	PMD_INIT_FUNC_TRACE();
445 
446 	if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
447 		return 0;
448 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
449 
450 	return 0;
451 }
452 
453 static int
454 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
455 {
456 	struct axgbe_port *pdata = dev->data->dev_private;
457 
458 	PMD_INIT_FUNC_TRACE();
459 
460 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
461 		return 0;
462 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
463 
464 	return 0;
465 }
466 
467 static int
468 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
469 {
470 	struct axgbe_port *pdata = dev->data->dev_private;
471 
472 	/* Set Default MAC Addr */
473 	axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
474 
475 	return 0;
476 }
477 
478 static int
479 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
480 			      uint32_t index, uint32_t pool __rte_unused)
481 {
482 	struct axgbe_port *pdata = dev->data->dev_private;
483 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
484 
485 	if (index > hw_feat->addn_mac) {
486 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
487 		return -EINVAL;
488 	}
489 	axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
490 	return 0;
491 }
492 
493 static int
494 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
495 			  struct rte_eth_rss_reta_entry64 *reta_conf,
496 			  uint16_t reta_size)
497 {
498 	struct axgbe_port *pdata = dev->data->dev_private;
499 	unsigned int i, idx, shift;
500 	int ret;
501 
502 	if (!pdata->rss_enable) {
503 		PMD_DRV_LOG(ERR, "RSS not enabled\n");
504 		return -ENOTSUP;
505 	}
506 
507 	if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
508 		PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
509 		return -EINVAL;
510 	}
511 
512 	for (i = 0; i < reta_size; i++) {
513 		idx = i / RTE_RETA_GROUP_SIZE;
514 		shift = i % RTE_RETA_GROUP_SIZE;
515 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
516 			continue;
517 		pdata->rss_table[i] = reta_conf[idx].reta[shift];
518 	}
519 
520 	/* Program the lookup table */
521 	ret = axgbe_write_rss_lookup_table(pdata);
522 	return ret;
523 }
524 
525 static int
526 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
527 			 struct rte_eth_rss_reta_entry64 *reta_conf,
528 			 uint16_t reta_size)
529 {
530 	struct axgbe_port *pdata = dev->data->dev_private;
531 	unsigned int i, idx, shift;
532 
533 	if (!pdata->rss_enable) {
534 		PMD_DRV_LOG(ERR, "RSS not enabled\n");
535 		return -ENOTSUP;
536 	}
537 
538 	if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
539 		PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
540 		return -EINVAL;
541 	}
542 
543 	for (i = 0; i < reta_size; i++) {
544 		idx = i / RTE_RETA_GROUP_SIZE;
545 		shift = i % RTE_RETA_GROUP_SIZE;
546 		if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
547 			continue;
548 		reta_conf[idx].reta[shift] = pdata->rss_table[i];
549 	}
550 	return 0;
551 }
552 
553 static int
554 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
555 			  struct rte_eth_rss_conf *rss_conf)
556 {
557 	struct axgbe_port *pdata = dev->data->dev_private;
558 	int ret;
559 
560 	if (!pdata->rss_enable) {
561 		PMD_DRV_LOG(ERR, "RSS not enabled\n");
562 		return -ENOTSUP;
563 	}
564 
565 	if (rss_conf == NULL) {
566 		PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
567 		return -EINVAL;
568 	}
569 
570 	if (rss_conf->rss_key != NULL &&
571 	    rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) {
572 		rte_memcpy(pdata->rss_key, rss_conf->rss_key,
573 		       AXGBE_RSS_HASH_KEY_SIZE);
574 		/* Program the hash key */
575 		ret = axgbe_write_rss_hash_key(pdata);
576 		if (ret != 0)
577 			return ret;
578 	}
579 
580 	pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
581 
582 	if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
583 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
584 	if (pdata->rss_hf &
585 	    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
586 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
587 	if (pdata->rss_hf &
588 	    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
589 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
590 
591 	/* Set the RSS options */
592 	AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
593 
594 	return 0;
595 }
596 
597 static int
598 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
599 			    struct rte_eth_rss_conf *rss_conf)
600 {
601 	struct axgbe_port *pdata = dev->data->dev_private;
602 
603 	if (!pdata->rss_enable) {
604 		PMD_DRV_LOG(ERR, "RSS not enabled\n");
605 		return -ENOTSUP;
606 	}
607 
608 	if (rss_conf == NULL) {
609 		PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
610 		return -EINVAL;
611 	}
612 
613 	if (rss_conf->rss_key != NULL &&
614 	    rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) {
615 		rte_memcpy(rss_conf->rss_key, pdata->rss_key,
616 		       AXGBE_RSS_HASH_KEY_SIZE);
617 	}
618 	rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE;
619 	rss_conf->rss_hf = pdata->rss_hf;
620 	return 0;
621 }
622 
623 static int
624 axgbe_dev_reset(struct rte_eth_dev *dev)
625 {
626 	int ret = 0;
627 
628 	ret = axgbe_dev_close(dev);
629 	if (ret)
630 		return ret;
631 
632 	ret = eth_axgbe_dev_init(dev);
633 
634 	return ret;
635 }
636 
637 static void
638 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
639 {
640 	struct axgbe_port *pdata = dev->data->dev_private;
641 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
642 
643 	if (index > hw_feat->addn_mac) {
644 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
645 		return;
646 	}
647 	axgbe_set_mac_addn_addr(pdata, NULL, index);
648 }
649 
650 static int
651 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
652 				      struct rte_ether_addr *mc_addr_set,
653 				      uint32_t nb_mc_addr)
654 {
655 	struct axgbe_port *pdata = dev->data->dev_private;
656 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
657 	uint32_t index = 1; /* 0 is always default mac */
658 	uint32_t i;
659 
660 	if (nb_mc_addr > hw_feat->addn_mac) {
661 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
662 		return -EINVAL;
663 	}
664 
665 	/* clear unicast addresses */
666 	for (i = 1; i < hw_feat->addn_mac; i++) {
667 		if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
668 			continue;
669 		memset(&dev->data->mac_addrs[i], 0,
670 		       sizeof(struct rte_ether_addr));
671 	}
672 
673 	while (nb_mc_addr--)
674 		axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
675 
676 	return 0;
677 }
678 
679 static int
680 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
681 			    struct rte_ether_addr *mac_addr, uint8_t add)
682 {
683 	struct axgbe_port *pdata = dev->data->dev_private;
684 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
685 
686 	if (!hw_feat->hash_table_size) {
687 		PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
688 		return -ENOTSUP;
689 	}
690 
691 	axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add);
692 
693 	if (pdata->uc_hash_mac_addr > 0) {
694 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
695 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
696 	} else {
697 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
698 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
699 	}
700 	return 0;
701 }
702 
703 static int
704 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
705 {
706 	struct axgbe_port *pdata = dev->data->dev_private;
707 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
708 	uint32_t index;
709 
710 	if (!hw_feat->hash_table_size) {
711 		PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
712 		return -ENOTSUP;
713 	}
714 
715 	for (index = 0; index < pdata->hash_table_count; index++) {
716 		if (add)
717 			pdata->uc_hash_table[index] = ~0;
718 		else
719 			pdata->uc_hash_table[index] = 0;
720 
721 		PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
722 			    add ? "set" : "clear", index);
723 
724 		AXGMAC_IOWRITE(pdata, MAC_HTR(index),
725 			       pdata->uc_hash_table[index]);
726 	}
727 
728 	if (add) {
729 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
730 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
731 	} else {
732 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
733 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
734 	}
735 	return 0;
736 }
737 
738 /* return 0 means link status changed, -1 means not changed */
739 static int
740 axgbe_dev_link_update(struct rte_eth_dev *dev,
741 		      int wait_to_complete __rte_unused)
742 {
743 	struct axgbe_port *pdata = dev->data->dev_private;
744 	struct rte_eth_link link;
745 	int ret = 0;
746 
747 	PMD_INIT_FUNC_TRACE();
748 	rte_delay_ms(800);
749 
750 	pdata->phy_if.phy_status(pdata);
751 
752 	memset(&link, 0, sizeof(struct rte_eth_link));
753 	link.link_duplex = pdata->phy.duplex;
754 	link.link_status = pdata->phy_link;
755 	link.link_speed = pdata->phy_speed;
756 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
757 			      ETH_LINK_SPEED_FIXED);
758 	ret = rte_eth_linkstatus_set(dev, &link);
759 	if (ret == -1)
760 		PMD_DRV_LOG(ERR, "No change in link status\n");
761 
762 	return ret;
763 }
764 
765 static int
766 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
767 {
768 	struct axgbe_port *pdata = dev->data->dev_private;
769 
770 	if (regs->data == NULL) {
771 		regs->length = axgbe_regs_get_count(pdata);
772 		regs->width = sizeof(uint32_t);
773 		return 0;
774 	}
775 
776 	/* Only full register dump is supported */
777 	if (regs->length &&
778 	    regs->length != (uint32_t)axgbe_regs_get_count(pdata))
779 		return -ENOTSUP;
780 
781 	regs->version = pdata->pci_dev->id.vendor_id << 16 |
782 			pdata->pci_dev->id.device_id;
783 	axgbe_regs_dump(pdata, regs->data);
784 	return 0;
785 }
786 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
787 {
788 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
789 
790 	/* Freeze counters */
791 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
792 
793 	/* Tx counters */
794 	stats->txoctetcount_gb +=
795 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
796 	stats->txoctetcount_gb +=
797 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
798 
799 	stats->txframecount_gb +=
800 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
801 	stats->txframecount_gb +=
802 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
803 
804 	stats->txbroadcastframes_g +=
805 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
806 	stats->txbroadcastframes_g +=
807 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
808 
809 	stats->txmulticastframes_g +=
810 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
811 	stats->txmulticastframes_g +=
812 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
813 
814 	stats->tx64octets_gb +=
815 		AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
816 	stats->tx64octets_gb +=
817 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
818 
819 	stats->tx65to127octets_gb +=
820 		AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
821 	stats->tx65to127octets_gb +=
822 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
823 
824 	stats->tx128to255octets_gb +=
825 		AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
826 	stats->tx128to255octets_gb +=
827 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
828 
829 	stats->tx256to511octets_gb +=
830 		AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
831 	stats->tx256to511octets_gb +=
832 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
833 
834 	stats->tx512to1023octets_gb +=
835 		AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
836 	stats->tx512to1023octets_gb +=
837 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
838 
839 	stats->tx1024tomaxoctets_gb +=
840 		AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
841 	stats->tx1024tomaxoctets_gb +=
842 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
843 
844 	stats->txunicastframes_gb +=
845 		AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
846 	stats->txunicastframes_gb +=
847 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
848 
849 	stats->txmulticastframes_gb +=
850 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
851 	stats->txmulticastframes_gb +=
852 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
853 
854 	stats->txbroadcastframes_g +=
855 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
856 	stats->txbroadcastframes_g +=
857 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
858 
859 	stats->txunderflowerror +=
860 		AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
861 	stats->txunderflowerror +=
862 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
863 
864 	stats->txoctetcount_g +=
865 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
866 	stats->txoctetcount_g +=
867 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
868 
869 	stats->txframecount_g +=
870 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
871 	stats->txframecount_g +=
872 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
873 
874 	stats->txpauseframes +=
875 		AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
876 	stats->txpauseframes +=
877 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
878 
879 	stats->txvlanframes_g +=
880 		AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
881 	stats->txvlanframes_g +=
882 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
883 
884 	/* Rx counters */
885 	stats->rxframecount_gb +=
886 		AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
887 	stats->rxframecount_gb +=
888 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
889 
890 	stats->rxoctetcount_gb +=
891 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
892 	stats->rxoctetcount_gb +=
893 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
894 
895 	stats->rxoctetcount_g +=
896 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
897 	stats->rxoctetcount_g +=
898 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
899 
900 	stats->rxbroadcastframes_g +=
901 		AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
902 	stats->rxbroadcastframes_g +=
903 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
904 
905 	stats->rxmulticastframes_g +=
906 		AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
907 	stats->rxmulticastframes_g +=
908 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
909 
910 	stats->rxcrcerror +=
911 		AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
912 	stats->rxcrcerror +=
913 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
914 
915 	stats->rxrunterror +=
916 		AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
917 
918 	stats->rxjabbererror +=
919 		AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
920 
921 	stats->rxundersize_g +=
922 		AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
923 
924 	stats->rxoversize_g +=
925 		AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
926 
927 	stats->rx64octets_gb +=
928 		AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
929 	stats->rx64octets_gb +=
930 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
931 
932 	stats->rx65to127octets_gb +=
933 		AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
934 	stats->rx65to127octets_gb +=
935 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
936 
937 	stats->rx128to255octets_gb +=
938 		AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
939 	stats->rx128to255octets_gb +=
940 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
941 
942 	stats->rx256to511octets_gb +=
943 		AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
944 	stats->rx256to511octets_gb +=
945 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
946 
947 	stats->rx512to1023octets_gb +=
948 		AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
949 	stats->rx512to1023octets_gb +=
950 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
951 
952 	stats->rx1024tomaxoctets_gb +=
953 		AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
954 	stats->rx1024tomaxoctets_gb +=
955 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
956 
957 	stats->rxunicastframes_g +=
958 		AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
959 	stats->rxunicastframes_g +=
960 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
961 
962 	stats->rxlengtherror +=
963 		AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
964 	stats->rxlengtherror +=
965 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
966 
967 	stats->rxoutofrangetype +=
968 		AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
969 	stats->rxoutofrangetype +=
970 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
971 
972 	stats->rxpauseframes +=
973 		AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
974 	stats->rxpauseframes +=
975 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
976 
977 	stats->rxfifooverflow +=
978 		AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
979 	stats->rxfifooverflow +=
980 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
981 
982 	stats->rxvlanframes_gb +=
983 		AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
984 	stats->rxvlanframes_gb +=
985 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
986 
987 	stats->rxwatchdogerror +=
988 		AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
989 
990 	/* Un-freeze counters */
991 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
992 }
993 
994 static int
995 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
996 		     unsigned int n)
997 {
998 	struct axgbe_port *pdata = dev->data->dev_private;
999 	unsigned int i;
1000 
1001 	if (!stats)
1002 		return 0;
1003 
1004 	axgbe_read_mmc_stats(pdata);
1005 
1006 	for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
1007 		stats[i].id = i;
1008 		stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1009 				axgbe_xstats_strings[i].offset);
1010 	}
1011 
1012 	return i;
1013 }
1014 
1015 static int
1016 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1017 			   struct rte_eth_xstat_name *xstats_names,
1018 			   unsigned int n)
1019 {
1020 	unsigned int i;
1021 
1022 	if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
1023 		for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
1024 			snprintf(xstats_names[i].name,
1025 				 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1026 				 axgbe_xstats_strings[i].name);
1027 		}
1028 	}
1029 
1030 	return AXGBE_XSTATS_COUNT;
1031 }
1032 
1033 static int
1034 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1035 			   uint64_t *values, unsigned int n)
1036 {
1037 	unsigned int i;
1038 	uint64_t values_copy[AXGBE_XSTATS_COUNT];
1039 
1040 	if (!ids) {
1041 		struct axgbe_port *pdata = dev->data->dev_private;
1042 
1043 		if (n < AXGBE_XSTATS_COUNT)
1044 			return AXGBE_XSTATS_COUNT;
1045 
1046 		axgbe_read_mmc_stats(pdata);
1047 
1048 		for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
1049 			values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1050 					axgbe_xstats_strings[i].offset);
1051 		}
1052 
1053 		return i;
1054 	}
1055 
1056 	axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
1057 
1058 	for (i = 0; i < n; i++) {
1059 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
1060 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
1061 			return -1;
1062 		}
1063 		values[i] = values_copy[ids[i]];
1064 	}
1065 	return n;
1066 }
1067 
1068 static int
1069 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1070 				 struct rte_eth_xstat_name *xstats_names,
1071 				 const uint64_t *ids,
1072 				 unsigned int size)
1073 {
1074 	struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
1075 	unsigned int i;
1076 
1077 	if (!ids)
1078 		return axgbe_dev_xstats_get_names(dev, xstats_names, size);
1079 
1080 	axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
1081 
1082 	for (i = 0; i < size; i++) {
1083 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
1084 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
1085 			return -1;
1086 		}
1087 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1088 	}
1089 	return size;
1090 }
1091 
1092 static int
1093 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1094 {
1095 	struct axgbe_port *pdata = dev->data->dev_private;
1096 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1097 
1098 	/* MMC registers are configured for reset on read */
1099 	axgbe_read_mmc_stats(pdata);
1100 
1101 	/* Reset stats */
1102 	memset(stats, 0, sizeof(*stats));
1103 
1104 	return 0;
1105 }
1106 
1107 static int
1108 axgbe_dev_stats_get(struct rte_eth_dev *dev,
1109 		    struct rte_eth_stats *stats)
1110 {
1111 	struct axgbe_rx_queue *rxq;
1112 	struct axgbe_tx_queue *txq;
1113 	struct axgbe_port *pdata = dev->data->dev_private;
1114 	struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
1115 	unsigned int i;
1116 
1117 	axgbe_read_mmc_stats(pdata);
1118 
1119 	stats->imissed = mmc_stats->rxfifooverflow;
1120 
1121 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1122 		rxq = dev->data->rx_queues[i];
1123 		if (rxq) {
1124 			stats->q_ipackets[i] = rxq->pkts;
1125 			stats->ipackets += rxq->pkts;
1126 			stats->q_ibytes[i] = rxq->bytes;
1127 			stats->ibytes += rxq->bytes;
1128 			stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
1129 			stats->q_errors[i] = rxq->errors
1130 				+ rxq->rx_mbuf_alloc_failed;
1131 			stats->ierrors += rxq->errors;
1132 		} else {
1133 			PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
1134 					dev->data->port_id);
1135 		}
1136 	}
1137 
1138 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1139 		txq = dev->data->tx_queues[i];
1140 		if (txq) {
1141 			stats->q_opackets[i] = txq->pkts;
1142 			stats->opackets += txq->pkts;
1143 			stats->q_obytes[i] = txq->bytes;
1144 			stats->obytes += txq->bytes;
1145 			stats->oerrors += txq->errors;
1146 		} else {
1147 			PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
1148 					dev->data->port_id);
1149 		}
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 static int
1156 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
1157 {
1158 	struct axgbe_rx_queue *rxq;
1159 	struct axgbe_tx_queue *txq;
1160 	unsigned int i;
1161 
1162 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1163 		rxq = dev->data->rx_queues[i];
1164 		if (rxq) {
1165 			rxq->pkts = 0;
1166 			rxq->bytes = 0;
1167 			rxq->errors = 0;
1168 			rxq->rx_mbuf_alloc_failed = 0;
1169 		} else {
1170 			PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
1171 					dev->data->port_id);
1172 		}
1173 	}
1174 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1175 		txq = dev->data->tx_queues[i];
1176 		if (txq) {
1177 			txq->pkts = 0;
1178 			txq->bytes = 0;
1179 			txq->errors = 0;
1180 		} else {
1181 			PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
1182 					dev->data->port_id);
1183 		}
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 static int
1190 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1191 {
1192 	struct axgbe_port *pdata = dev->data->dev_private;
1193 
1194 	dev_info->max_rx_queues = pdata->rx_ring_count;
1195 	dev_info->max_tx_queues = pdata->tx_ring_count;
1196 	dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
1197 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
1198 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
1199 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
1200 	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
1201 
1202 	dev_info->rx_offload_capa =
1203 		DEV_RX_OFFLOAD_IPV4_CKSUM |
1204 		DEV_RX_OFFLOAD_UDP_CKSUM  |
1205 		DEV_RX_OFFLOAD_TCP_CKSUM  |
1206 		DEV_RX_OFFLOAD_JUMBO_FRAME	|
1207 		DEV_RX_OFFLOAD_SCATTER	  |
1208 		DEV_RX_OFFLOAD_KEEP_CRC;
1209 
1210 	dev_info->tx_offload_capa =
1211 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
1212 		DEV_TX_OFFLOAD_UDP_CKSUM   |
1213 		DEV_TX_OFFLOAD_TCP_CKSUM;
1214 
1215 	if (pdata->hw_feat.rss) {
1216 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
1217 		dev_info->reta_size = pdata->hw_feat.hash_table_size;
1218 		dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
1219 	}
1220 
1221 	dev_info->rx_desc_lim = rx_desc_lim;
1222 	dev_info->tx_desc_lim = tx_desc_lim;
1223 
1224 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1225 		.rx_free_thresh = AXGBE_RX_FREE_THRESH,
1226 	};
1227 
1228 	dev_info->default_txconf = (struct rte_eth_txconf) {
1229 		.tx_free_thresh = AXGBE_TX_FREE_THRESH,
1230 	};
1231 
1232 	return 0;
1233 }
1234 
1235 static int
1236 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1237 {
1238 	struct axgbe_port *pdata = dev->data->dev_private;
1239 	struct xgbe_fc_info fc = pdata->fc;
1240 	unsigned int reg, reg_val = 0;
1241 
1242 	reg = MAC_Q0TFCR;
1243 	reg_val = AXGMAC_IOREAD(pdata, reg);
1244 	fc.low_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA);
1245 	fc.high_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD);
1246 	fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT);
1247 	fc.autoneg = pdata->pause_autoneg;
1248 
1249 	if (pdata->rx_pause && pdata->tx_pause)
1250 		fc.mode = RTE_FC_FULL;
1251 	else if (pdata->rx_pause)
1252 		fc.mode = RTE_FC_RX_PAUSE;
1253 	else if (pdata->tx_pause)
1254 		fc.mode = RTE_FC_TX_PAUSE;
1255 	else
1256 		fc.mode = RTE_FC_NONE;
1257 
1258 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
1259 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
1260 	fc_conf->pause_time = fc.pause_time[0];
1261 	fc_conf->send_xon = fc.send_xon;
1262 	fc_conf->mode = fc.mode;
1263 
1264 	return 0;
1265 }
1266 
1267 static int
1268 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1269 {
1270 	struct axgbe_port *pdata = dev->data->dev_private;
1271 	struct xgbe_fc_info fc = pdata->fc;
1272 	unsigned int reg, reg_val = 0;
1273 	reg = MAC_Q0TFCR;
1274 
1275 	pdata->pause_autoneg = fc_conf->autoneg;
1276 	pdata->phy.pause_autoneg = pdata->pause_autoneg;
1277 	fc.send_xon = fc_conf->send_xon;
1278 	AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA,
1279 			AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water));
1280 	AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD,
1281 			AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water));
1282 	AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time);
1283 	AXGMAC_IOWRITE(pdata, reg, reg_val);
1284 	fc.mode = fc_conf->mode;
1285 
1286 	if (fc.mode == RTE_FC_FULL) {
1287 		pdata->tx_pause = 1;
1288 		pdata->rx_pause = 1;
1289 	} else if (fc.mode == RTE_FC_RX_PAUSE) {
1290 		pdata->tx_pause = 0;
1291 		pdata->rx_pause = 1;
1292 	} else if (fc.mode == RTE_FC_TX_PAUSE) {
1293 		pdata->tx_pause = 1;
1294 		pdata->rx_pause = 0;
1295 	} else {
1296 		pdata->tx_pause = 0;
1297 		pdata->rx_pause = 0;
1298 	}
1299 
1300 	if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1301 		pdata->hw_if.config_tx_flow_control(pdata);
1302 
1303 	if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1304 		pdata->hw_if.config_rx_flow_control(pdata);
1305 
1306 	pdata->hw_if.config_flow_control(pdata);
1307 	pdata->phy.tx_pause = pdata->tx_pause;
1308 	pdata->phy.rx_pause = pdata->rx_pause;
1309 
1310 	return 0;
1311 }
1312 
1313 static int
1314 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
1315 		struct rte_eth_pfc_conf *pfc_conf)
1316 {
1317 	struct axgbe_port *pdata = dev->data->dev_private;
1318 	struct xgbe_fc_info fc = pdata->fc;
1319 	uint8_t tc_num;
1320 
1321 	tc_num = pdata->pfc_map[pfc_conf->priority];
1322 
1323 	if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) {
1324 		PMD_INIT_LOG(ERR, "Max supported  traffic class: %d\n",
1325 				pdata->hw_feat.tc_cnt);
1326 	return -EINVAL;
1327 	}
1328 
1329 	pdata->pause_autoneg = pfc_conf->fc.autoneg;
1330 	pdata->phy.pause_autoneg = pdata->pause_autoneg;
1331 	fc.send_xon = pfc_conf->fc.send_xon;
1332 	AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA,
1333 		AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water));
1334 	AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD,
1335 		AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water));
1336 
1337 	switch (tc_num) {
1338 	case 0:
1339 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1340 				PSTC0, pfc_conf->fc.pause_time);
1341 		break;
1342 	case 1:
1343 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1344 				PSTC1, pfc_conf->fc.pause_time);
1345 		break;
1346 	case 2:
1347 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1348 				PSTC2, pfc_conf->fc.pause_time);
1349 		break;
1350 	case 3:
1351 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1352 				PSTC3, pfc_conf->fc.pause_time);
1353 		break;
1354 	case 4:
1355 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1356 				PSTC4, pfc_conf->fc.pause_time);
1357 		break;
1358 	case 5:
1359 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1360 				PSTC5, pfc_conf->fc.pause_time);
1361 		break;
1362 	case 7:
1363 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1364 				PSTC6, pfc_conf->fc.pause_time);
1365 		break;
1366 	case 6:
1367 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1368 				PSTC7, pfc_conf->fc.pause_time);
1369 		break;
1370 	}
1371 
1372 	fc.mode = pfc_conf->fc.mode;
1373 
1374 	if (fc.mode == RTE_FC_FULL) {
1375 		pdata->tx_pause = 1;
1376 		pdata->rx_pause = 1;
1377 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1378 	} else if (fc.mode == RTE_FC_RX_PAUSE) {
1379 		pdata->tx_pause = 0;
1380 		pdata->rx_pause = 1;
1381 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1382 	} else if (fc.mode == RTE_FC_TX_PAUSE) {
1383 		pdata->tx_pause = 1;
1384 		pdata->rx_pause = 0;
1385 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1386 	} else {
1387 		pdata->tx_pause = 0;
1388 		pdata->rx_pause = 0;
1389 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1390 	}
1391 
1392 	if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1393 		pdata->hw_if.config_tx_flow_control(pdata);
1394 
1395 	if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1396 		pdata->hw_if.config_rx_flow_control(pdata);
1397 	pdata->hw_if.config_flow_control(pdata);
1398 	pdata->phy.tx_pause = pdata->tx_pause;
1399 	pdata->phy.rx_pause = pdata->rx_pause;
1400 
1401 	return 0;
1402 }
1403 
1404 void
1405 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1406 	struct rte_eth_rxq_info *qinfo)
1407 {
1408 	struct   axgbe_rx_queue *rxq;
1409 
1410 	rxq = dev->data->rx_queues[queue_id];
1411 	qinfo->mp = rxq->mb_pool;
1412 	qinfo->scattered_rx = dev->data->scattered_rx;
1413 	qinfo->nb_desc = rxq->nb_desc;
1414 	qinfo->conf.rx_free_thresh = rxq->free_thresh;
1415 }
1416 
1417 void
1418 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1419 	struct rte_eth_txq_info *qinfo)
1420 {
1421 	struct  axgbe_tx_queue *txq;
1422 
1423 	txq = dev->data->tx_queues[queue_id];
1424 	qinfo->nb_desc = txq->nb_desc;
1425 	qinfo->conf.tx_free_thresh = txq->free_thresh;
1426 }
1427 const uint32_t *
1428 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1429 {
1430 	static const uint32_t ptypes[] = {
1431 		RTE_PTYPE_L2_ETHER,
1432 		RTE_PTYPE_L2_ETHER_TIMESYNC,
1433 		RTE_PTYPE_L2_ETHER_LLDP,
1434 		RTE_PTYPE_L2_ETHER_ARP,
1435 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1436 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1437 		RTE_PTYPE_L4_FRAG,
1438 		RTE_PTYPE_L4_ICMP,
1439 		RTE_PTYPE_L4_NONFRAG,
1440 		RTE_PTYPE_L4_SCTP,
1441 		RTE_PTYPE_L4_TCP,
1442 		RTE_PTYPE_L4_UDP,
1443 		RTE_PTYPE_TUNNEL_GRENAT,
1444 		RTE_PTYPE_TUNNEL_IP,
1445 		RTE_PTYPE_INNER_L2_ETHER,
1446 		RTE_PTYPE_INNER_L2_ETHER_VLAN,
1447 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1448 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1449 		RTE_PTYPE_INNER_L4_FRAG,
1450 		RTE_PTYPE_INNER_L4_ICMP,
1451 		RTE_PTYPE_INNER_L4_NONFRAG,
1452 		RTE_PTYPE_INNER_L4_SCTP,
1453 		RTE_PTYPE_INNER_L4_TCP,
1454 		RTE_PTYPE_INNER_L4_UDP,
1455 		RTE_PTYPE_UNKNOWN
1456 	};
1457 
1458 	if (dev->rx_pkt_burst == axgbe_recv_pkts)
1459 		return ptypes;
1460 	return NULL;
1461 }
1462 
1463 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1464 {
1465 	struct rte_eth_dev_info dev_info;
1466 	struct axgbe_port *pdata = dev->data->dev_private;
1467 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1468 	unsigned int val = 0;
1469 	axgbe_dev_info_get(dev, &dev_info);
1470 	/* check that mtu is within the allowed range */
1471 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1472 		return -EINVAL;
1473 	/* mtu setting is forbidden if port is start */
1474 	if (dev->data->dev_started) {
1475 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1476 				dev->data->port_id);
1477 		return -EBUSY;
1478 	}
1479 	if (frame_size > RTE_ETHER_MAX_LEN) {
1480 		dev->data->dev_conf.rxmode.offloads |=
1481 			DEV_RX_OFFLOAD_JUMBO_FRAME;
1482 		val = 1;
1483 	} else {
1484 		dev->data->dev_conf.rxmode.offloads &=
1485 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
1486 		val = 0;
1487 	}
1488 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1489 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1490 	return 0;
1491 }
1492 
1493 static void
1494 axgbe_update_tstamp_time(struct axgbe_port *pdata,
1495 		unsigned int sec, unsigned int nsec, int addsub)
1496 {
1497 	unsigned int count = 100;
1498 	uint32_t sub_val = 0;
1499 	uint32_t sub_val_sec = 0xFFFFFFFF;
1500 	uint32_t sub_val_nsec = 0x3B9ACA00;
1501 
1502 	if (addsub) {
1503 		if (sec)
1504 			sub_val = sub_val_sec - (sec - 1);
1505 		else
1506 			sub_val = sec;
1507 
1508 		AXGMAC_IOWRITE(pdata, MAC_STSUR, sub_val);
1509 		sub_val = sub_val_nsec - nsec;
1510 		AXGMAC_IOWRITE(pdata, MAC_STNUR, sub_val);
1511 		AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 1);
1512 	} else {
1513 		AXGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1514 		AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 0);
1515 		AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1516 	}
1517 	AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1);
1518 	/* Wait for time update to complete */
1519 	while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
1520 		rte_delay_ms(1);
1521 }
1522 
1523 static inline uint64_t
1524 div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
1525 {
1526 	*remainder = dividend % divisor;
1527 	return dividend / divisor;
1528 }
1529 
1530 static inline uint64_t
1531 div_u64(uint64_t dividend, uint32_t divisor)
1532 {
1533 	uint32_t remainder;
1534 	return div_u64_rem(dividend, divisor, &remainder);
1535 }
1536 
1537 static int
1538 axgbe_adjfreq(struct axgbe_port *pdata, int64_t delta)
1539 {
1540 	uint64_t adjust;
1541 	uint32_t addend, diff;
1542 	unsigned int neg_adjust = 0;
1543 
1544 	if (delta < 0) {
1545 		neg_adjust = 1;
1546 		delta = -delta;
1547 	}
1548 	adjust = (uint64_t)pdata->tstamp_addend;
1549 	adjust *= delta;
1550 	diff = (uint32_t)div_u64(adjust, 1000000000UL);
1551 	addend = (neg_adjust) ? pdata->tstamp_addend - diff :
1552 				pdata->tstamp_addend + diff;
1553 	pdata->tstamp_addend = addend;
1554 	axgbe_update_tstamp_addend(pdata, addend);
1555 	return 0;
1556 }
1557 
1558 static int
1559 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
1560 {
1561 	struct axgbe_port *pdata = dev->data->dev_private;
1562 	struct timespec timestamp_delta;
1563 
1564 	axgbe_adjfreq(pdata, delta);
1565 	pdata->systime_tc.nsec += delta;
1566 
1567 	if (delta < 0) {
1568 		delta = -delta;
1569 		timestamp_delta = rte_ns_to_timespec(delta);
1570 		axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec,
1571 				timestamp_delta.tv_nsec, 1);
1572 	} else {
1573 		timestamp_delta = rte_ns_to_timespec(delta);
1574 		axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec,
1575 				timestamp_delta.tv_nsec, 0);
1576 	}
1577 	return 0;
1578 }
1579 
1580 static int
1581 axgbe_timesync_read_time(struct rte_eth_dev *dev,
1582 		struct timespec *timestamp)
1583 {
1584 	uint64_t nsec;
1585 	struct axgbe_port *pdata = dev->data->dev_private;
1586 
1587 	nsec = AXGMAC_IOREAD(pdata, MAC_STSR);
1588 	nsec *= NSEC_PER_SEC;
1589 	nsec += AXGMAC_IOREAD(pdata, MAC_STNR);
1590 	*timestamp = rte_ns_to_timespec(nsec);
1591 	return 0;
1592 }
1593 static int
1594 axgbe_timesync_write_time(struct rte_eth_dev *dev,
1595 				    const struct timespec *timestamp)
1596 {
1597 	unsigned int count = 100;
1598 	struct axgbe_port *pdata = dev->data->dev_private;
1599 
1600 	AXGMAC_IOWRITE(pdata, MAC_STSUR, timestamp->tv_sec);
1601 	AXGMAC_IOWRITE(pdata, MAC_STNUR, timestamp->tv_nsec);
1602 	AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1);
1603 	/* Wait for time update to complete */
1604 	while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
1605 		rte_delay_ms(1);
1606 	if (!count)
1607 		PMD_DRV_LOG(ERR, "Timed out update timestamp\n");
1608 	return 0;
1609 }
1610 
1611 static void
1612 axgbe_update_tstamp_addend(struct axgbe_port *pdata,
1613 		uint32_t addend)
1614 {
1615 	unsigned int count = 100;
1616 
1617 	AXGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1618 	AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1619 
1620 	/* Wait for addend update to complete */
1621 	while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1622 		rte_delay_ms(1);
1623 	if (!count)
1624 		PMD_DRV_LOG(ERR, "Timed out updating timestamp addend register\n");
1625 }
1626 
1627 static void
1628 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
1629 		unsigned int nsec)
1630 {
1631 	unsigned int count = 100;
1632 
1633 	/*System Time Sec Update*/
1634 	AXGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1635 	/*System Time nanoSec Update*/
1636 	AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1637 	/*Initialize Timestamp*/
1638 	AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1639 
1640 	/* Wait for time update to complete */
1641 	while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1642 		rte_delay_ms(1);
1643 	if (!count)
1644 		PMD_DRV_LOG(ERR, "Timed out initializing timestamp\n");
1645 }
1646 
1647 static int
1648 axgbe_timesync_enable(struct rte_eth_dev *dev)
1649 {
1650 	struct axgbe_port *pdata = dev->data->dev_private;
1651 	unsigned int mac_tscr = 0;
1652 	uint64_t dividend;
1653 	struct timespec timestamp;
1654 	uint64_t nsec;
1655 
1656 	/* Set one nano-second accuracy */
1657 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1658 
1659 	/* Set fine timestamp update */
1660 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1661 
1662 	/* Overwrite earlier timestamps */
1663 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1664 
1665 	AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1666 
1667 	/* Enabling processing of ptp over eth pkt */
1668 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1669 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1670 	/* Enable timestamp for all pkts*/
1671 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1672 
1673 	/* enabling timestamp */
1674 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1675 	AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1676 
1677 	/* Exit if timestamping is not enabled */
1678 	if (!AXGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) {
1679 		PMD_DRV_LOG(ERR, "Exiting as timestamp is not enabled\n");
1680 		return 0;
1681 	}
1682 
1683 	/* Sub-second Increment Value*/
1684 	AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, AXGBE_TSTAMP_SSINC);
1685 	/* Sub-nanosecond Increment Value */
1686 	AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, AXGBE_TSTAMP_SNSINC);
1687 
1688 	pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1689 	dividend = 50000000;
1690 	dividend <<= 32;
1691 	pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
1692 
1693 	axgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1694 	axgbe_set_tstamp_time(pdata, 0, 0);
1695 
1696 	/* Initialize the timecounter */
1697 	memset(&pdata->systime_tc, 0, sizeof(struct rte_timecounter));
1698 
1699 	pdata->systime_tc.cc_mask = AXGBE_CYCLECOUNTER_MASK;
1700 	pdata->systime_tc.cc_shift = 0;
1701 	pdata->systime_tc.nsec_mask = 0;
1702 
1703 	PMD_DRV_LOG(DEBUG, "Initializing system time counter with realtime\n");
1704 
1705 	/* Updating the counter once with clock real time */
1706 	clock_gettime(CLOCK_REALTIME, &timestamp);
1707 	nsec = rte_timespec_to_ns(&timestamp);
1708 	nsec = rte_timecounter_update(&pdata->systime_tc, nsec);
1709 	axgbe_set_tstamp_time(pdata, timestamp.tv_sec, timestamp.tv_nsec);
1710 	return 0;
1711 }
1712 
1713 static int
1714 axgbe_timesync_disable(struct rte_eth_dev *dev)
1715 {
1716 	struct axgbe_port *pdata = dev->data->dev_private;
1717 	unsigned int mac_tscr = 0;
1718 
1719 	/*disable timestamp for all pkts*/
1720 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 0);
1721 	/*disable the addened register*/
1722 	AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 0);
1723 	/* disable timestamp update */
1724 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 0);
1725 	/*disable time stamp*/
1726 	AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 0);
1727 	return 0;
1728 }
1729 
1730 static int
1731 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
1732 				struct timespec *timestamp, uint32_t flags)
1733 {
1734 	uint64_t nsec = 0;
1735 	volatile union axgbe_rx_desc *desc;
1736 	uint16_t idx, pmt;
1737 	struct axgbe_rx_queue *rxq = *dev->data->rx_queues;
1738 
1739 	idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
1740 	desc = &rxq->desc[idx];
1741 
1742 	while (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
1743 		rte_delay_ms(1);
1744 	if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CTXT)) {
1745 		if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_CONTEXT_DESC3, TSA) &&
1746 				!AXGMAC_GET_BITS_LE(desc->write.desc3,
1747 					RX_CONTEXT_DESC3, TSD)) {
1748 			pmt = AXGMAC_GET_BITS_LE(desc->write.desc3,
1749 					RX_CONTEXT_DESC3, PMT);
1750 			nsec = rte_le_to_cpu_32(desc->write.desc1);
1751 			nsec *= NSEC_PER_SEC;
1752 			nsec += rte_le_to_cpu_32(desc->write.desc0);
1753 			if (nsec != 0xffffffffffffffffULL) {
1754 				if (pmt == 0x01)
1755 					*timestamp = rte_ns_to_timespec(nsec);
1756 				PMD_DRV_LOG(DEBUG,
1757 					"flags = 0x%x nsec = %"PRIu64"\n",
1758 					flags, nsec);
1759 			}
1760 		}
1761 	}
1762 
1763 	return 0;
1764 }
1765 
1766 static int
1767 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
1768 				struct timespec *timestamp)
1769 {
1770 	uint64_t nsec;
1771 	struct axgbe_port *pdata = dev->data->dev_private;
1772 	unsigned int tx_snr, tx_ssr;
1773 
1774 	rte_delay_us(5);
1775 	if (pdata->vdata->tx_tstamp_workaround) {
1776 		tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR);
1777 		tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR);
1778 
1779 	} else {
1780 		tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR);
1781 		tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR);
1782 	}
1783 	if (AXGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) {
1784 		PMD_DRV_LOG(DEBUG, "Waiting for TXTSSTSMIS\n");
1785 		return 0;
1786 	}
1787 	nsec = tx_ssr;
1788 	nsec *= NSEC_PER_SEC;
1789 	nsec += tx_snr;
1790 	PMD_DRV_LOG(DEBUG, "nsec = %"PRIu64" tx_ssr = %d tx_snr = %d\n",
1791 			nsec, tx_ssr, tx_snr);
1792 	*timestamp = rte_ns_to_timespec(nsec);
1793 	return 0;
1794 }
1795 
1796 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
1797 {
1798 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
1799 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1800 
1801 	mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
1802 	mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
1803 	mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
1804 
1805 	memset(hw_feat, 0, sizeof(*hw_feat));
1806 
1807 	hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
1808 
1809 	/* Hardware feature register 0 */
1810 	hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
1811 	hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
1812 	hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
1813 	hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
1814 	hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
1815 	hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
1816 	hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
1817 	hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
1818 	hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
1819 	hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
1820 	hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
1821 	hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
1822 					      ADDMACADRSEL);
1823 	hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
1824 	hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
1825 
1826 	/* Hardware feature register 1 */
1827 	hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1828 						RXFIFOSIZE);
1829 	hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1830 						TXFIFOSIZE);
1831 	hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
1832 						 MAC_HWF1R, ADVTHWORD);
1833 	hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
1834 	hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
1835 	hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
1836 	hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
1837 	hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
1838 	hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
1839 	hw_feat->tc_cnt	       = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
1840 	hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1841 						  HASHTBLSZ);
1842 	hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1843 						  L3L4FNUM);
1844 
1845 	/* Hardware feature register 2 */
1846 	hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
1847 	hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
1848 	hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
1849 	hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
1850 	hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
1851 	hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
1852 						AUXSNAPNUM);
1853 
1854 	/* Translate the Hash Table size into actual number */
1855 	switch (hw_feat->hash_table_size) {
1856 	case 0:
1857 		break;
1858 	case 1:
1859 		hw_feat->hash_table_size = 64;
1860 		break;
1861 	case 2:
1862 		hw_feat->hash_table_size = 128;
1863 		break;
1864 	case 3:
1865 		hw_feat->hash_table_size = 256;
1866 		break;
1867 	}
1868 
1869 	/* Translate the address width setting into actual number */
1870 	switch (hw_feat->dma_width) {
1871 	case 0:
1872 		hw_feat->dma_width = 32;
1873 		break;
1874 	case 1:
1875 		hw_feat->dma_width = 40;
1876 		break;
1877 	case 2:
1878 		hw_feat->dma_width = 48;
1879 		break;
1880 	default:
1881 		hw_feat->dma_width = 32;
1882 	}
1883 
1884 	/* The Queue, Channel and TC counts are zero based so increment them
1885 	 * to get the actual number
1886 	 */
1887 	hw_feat->rx_q_cnt++;
1888 	hw_feat->tx_q_cnt++;
1889 	hw_feat->rx_ch_cnt++;
1890 	hw_feat->tx_ch_cnt++;
1891 	hw_feat->tc_cnt++;
1892 
1893 	/* Translate the fifo sizes into actual numbers */
1894 	hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
1895 	hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
1896 }
1897 
1898 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
1899 {
1900 	axgbe_init_function_ptrs_dev(&pdata->hw_if);
1901 	axgbe_init_function_ptrs_phy(&pdata->phy_if);
1902 	axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
1903 	pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
1904 }
1905 
1906 static void axgbe_set_counts(struct axgbe_port *pdata)
1907 {
1908 	/* Set all the function pointers */
1909 	axgbe_init_all_fptrs(pdata);
1910 
1911 	/* Populate the hardware features */
1912 	axgbe_get_all_hw_features(pdata);
1913 
1914 	/* Set default max values if not provided */
1915 	if (!pdata->tx_max_channel_count)
1916 		pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
1917 	if (!pdata->rx_max_channel_count)
1918 		pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
1919 
1920 	if (!pdata->tx_max_q_count)
1921 		pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
1922 	if (!pdata->rx_max_q_count)
1923 		pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
1924 
1925 	/* Calculate the number of Tx and Rx rings to be created
1926 	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
1927 	 *   the number of Tx queues to the number of Tx channels
1928 	 *   enabled
1929 	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
1930 	 *   number of Rx queues or maximum allowed
1931 	 */
1932 	pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
1933 				     pdata->tx_max_channel_count);
1934 	pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
1935 				     pdata->tx_max_q_count);
1936 
1937 	pdata->tx_q_count = pdata->tx_ring_count;
1938 
1939 	pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
1940 				     pdata->rx_max_channel_count);
1941 
1942 	pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
1943 				  pdata->rx_max_q_count);
1944 }
1945 
1946 static void axgbe_default_config(struct axgbe_port *pdata)
1947 {
1948 	pdata->pblx8 = DMA_PBL_X8_ENABLE;
1949 	pdata->tx_sf_mode = MTL_TSF_ENABLE;
1950 	pdata->tx_threshold = MTL_TX_THRESHOLD_64;
1951 	pdata->tx_pbl = DMA_PBL_32;
1952 	pdata->tx_osp_mode = DMA_OSP_ENABLE;
1953 	pdata->rx_sf_mode = MTL_RSF_ENABLE;
1954 	pdata->rx_threshold = MTL_RX_THRESHOLD_64;
1955 	pdata->rx_pbl = DMA_PBL_32;
1956 	pdata->pause_autoneg = 1;
1957 	pdata->tx_pause = 0;
1958 	pdata->rx_pause = 0;
1959 	pdata->phy_speed = SPEED_UNKNOWN;
1960 	pdata->power_down = 0;
1961 }
1962 
1963 static int
1964 pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
1965 {
1966 	const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
1967 	const struct rte_pci_id *pcid = _pci_id;
1968 
1969 	if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
1970 			pdev->id.device_id == pcid->device_id)
1971 		return 0;
1972 	return 1;
1973 }
1974 
1975 static bool
1976 pci_search_device(int device_id)
1977 {
1978 	struct rte_bus *pci_bus;
1979 	struct rte_pci_id dev_id;
1980 
1981 	dev_id.device_id = device_id;
1982 	pci_bus = rte_bus_find_by_name("pci");
1983 	return (pci_bus != NULL) &&
1984 		(pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
1985 }
1986 
1987 /*
1988  * It returns 0 on success.
1989  */
1990 static int
1991 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
1992 {
1993 	PMD_INIT_FUNC_TRACE();
1994 	struct axgbe_port *pdata;
1995 	struct rte_pci_device *pci_dev;
1996 	uint32_t reg, mac_lo, mac_hi;
1997 	uint32_t len;
1998 	int ret;
1999 
2000 	eth_dev->dev_ops = &axgbe_eth_dev_ops;
2001 
2002 	eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status;
2003 	eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status;
2004 
2005 	/*
2006 	 * For secondary processes, we don't initialise any further as primary
2007 	 * has already done this work.
2008 	 */
2009 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2010 		return 0;
2011 
2012 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2013 
2014 	pdata = eth_dev->data->dev_private;
2015 	/* initial state */
2016 	rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
2017 	rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
2018 	pdata->eth_dev = eth_dev;
2019 
2020 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2021 	pdata->pci_dev = pci_dev;
2022 
2023 	/*
2024 	 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
2025 	 */
2026 	if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
2027 		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
2028 		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
2029 	} else {
2030 		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
2031 		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
2032 	}
2033 
2034 	pdata->xgmac_regs =
2035 		(void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
2036 	pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
2037 				     + AXGBE_MAC_PROP_OFFSET);
2038 	pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
2039 				    + AXGBE_I2C_CTRL_OFFSET);
2040 	pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
2041 
2042 	/* version specific driver data*/
2043 	if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
2044 		pdata->vdata = &axgbe_v2a;
2045 	else
2046 		pdata->vdata = &axgbe_v2b;
2047 
2048 	/* Configure the PCS indirect addressing support */
2049 	reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
2050 	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
2051 	pdata->xpcs_window <<= 6;
2052 	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
2053 	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
2054 	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
2055 
2056 	PMD_INIT_LOG(DEBUG,
2057 		     "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
2058 		     pdata->xpcs_window_size, pdata->xpcs_window_mask);
2059 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
2060 
2061 	/* Retrieve the MAC address */
2062 	mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
2063 	mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
2064 	pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
2065 	pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
2066 	pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
2067 	pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
2068 	pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
2069 	pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
2070 
2071 	len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
2072 	eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
2073 
2074 	if (!eth_dev->data->mac_addrs) {
2075 		PMD_INIT_LOG(ERR,
2076 			     "Failed to alloc %u bytes needed to "
2077 			     "store MAC addresses", len);
2078 		return -ENOMEM;
2079 	}
2080 
2081 	/* Allocate memory for storing hash filter MAC addresses */
2082 	len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS;
2083 	eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr",
2084 						    len, 0);
2085 
2086 	if (eth_dev->data->hash_mac_addrs == NULL) {
2087 		PMD_INIT_LOG(ERR,
2088 			     "Failed to allocate %d bytes needed to "
2089 			     "store MAC addresses", len);
2090 		return -ENOMEM;
2091 	}
2092 
2093 	if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
2094 		rte_eth_random_addr(pdata->mac_addr.addr_bytes);
2095 
2096 	/* Copy the permanent MAC address */
2097 	rte_ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
2098 
2099 	/* Clock settings */
2100 	pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
2101 	pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
2102 
2103 	/* Set the DMA coherency values */
2104 	pdata->coherent = 1;
2105 	pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
2106 	pdata->arcache = AXGBE_DMA_OS_ARCACHE;
2107 	pdata->awcache = AXGBE_DMA_OS_AWCACHE;
2108 
2109 	/* Set the maximum channels and queues */
2110 	reg = XP_IOREAD(pdata, XP_PROP_1);
2111 	pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
2112 	pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
2113 	pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
2114 	pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
2115 
2116 	/* Set the hardware channel and queue counts */
2117 	axgbe_set_counts(pdata);
2118 
2119 	/* Set the maximum fifo amounts */
2120 	reg = XP_IOREAD(pdata, XP_PROP_2);
2121 	pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
2122 	pdata->tx_max_fifo_size *= 16384;
2123 	pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
2124 					  pdata->vdata->tx_max_fifo_size);
2125 	pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
2126 	pdata->rx_max_fifo_size *= 16384;
2127 	pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
2128 					  pdata->vdata->rx_max_fifo_size);
2129 	/* Issue software reset to DMA */
2130 	ret = pdata->hw_if.exit(pdata);
2131 	if (ret)
2132 		PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
2133 
2134 	/* Set default configuration data */
2135 	axgbe_default_config(pdata);
2136 
2137 	/* Set default max values if not provided */
2138 	if (!pdata->tx_max_fifo_size)
2139 		pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
2140 	if (!pdata->rx_max_fifo_size)
2141 		pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
2142 
2143 	pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
2144 	pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
2145 	pthread_mutex_init(&pdata->xpcs_mutex, NULL);
2146 	pthread_mutex_init(&pdata->i2c_mutex, NULL);
2147 	pthread_mutex_init(&pdata->an_mutex, NULL);
2148 	pthread_mutex_init(&pdata->phy_mutex, NULL);
2149 
2150 	ret = pdata->phy_if.phy_init(pdata);
2151 	if (ret) {
2152 		rte_free(eth_dev->data->mac_addrs);
2153 		eth_dev->data->mac_addrs = NULL;
2154 		return ret;
2155 	}
2156 
2157 	rte_intr_callback_register(&pci_dev->intr_handle,
2158 				   axgbe_dev_interrupt_handler,
2159 				   (void *)eth_dev);
2160 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
2161 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
2162 		     pci_dev->id.device_id);
2163 
2164 	return 0;
2165 }
2166 
2167 static int
2168 axgbe_dev_close(struct rte_eth_dev *eth_dev)
2169 {
2170 	struct rte_pci_device *pci_dev;
2171 
2172 	PMD_INIT_FUNC_TRACE();
2173 
2174 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2175 		return 0;
2176 
2177 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2178 	axgbe_dev_clear_queues(eth_dev);
2179 
2180 	/* disable uio intr before callback unregister */
2181 	rte_intr_disable(&pci_dev->intr_handle);
2182 	rte_intr_callback_unregister(&pci_dev->intr_handle,
2183 				     axgbe_dev_interrupt_handler,
2184 				     (void *)eth_dev);
2185 
2186 	return 0;
2187 }
2188 
2189 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2190 	struct rte_pci_device *pci_dev)
2191 {
2192 	return rte_eth_dev_pci_generic_probe(pci_dev,
2193 		sizeof(struct axgbe_port), eth_axgbe_dev_init);
2194 }
2195 
2196 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
2197 {
2198 	return rte_eth_dev_pci_generic_remove(pci_dev, axgbe_dev_close);
2199 }
2200 
2201 static struct rte_pci_driver rte_axgbe_pmd = {
2202 	.id_table = pci_id_axgbe_map,
2203 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2204 	.probe = eth_axgbe_pci_probe,
2205 	.remove = eth_axgbe_pci_remove,
2206 };
2207 
2208 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
2209 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
2210 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2211 RTE_LOG_REGISTER(axgbe_logtype_init, pmd.net.axgbe.init, NOTICE);
2212 RTE_LOG_REGISTER(axgbe_logtype_driver, pmd.net.axgbe.driver, NOTICE);
2213