xref: /dpdk/drivers/net/axgbe/axgbe_ethdev.c (revision cf97f33e8b2a3e9039836ed6337dccc67c4d2696)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10 #include "axgbe_regs.h"
11 
12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
14 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
15 static int  axgbe_dev_start(struct rte_eth_dev *dev);
16 static void axgbe_dev_stop(struct rte_eth_dev *dev);
17 static void axgbe_dev_interrupt_handler(void *param);
18 static void axgbe_dev_close(struct rte_eth_dev *dev);
19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
24 				  struct rte_ether_addr *mac_addr);
25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
26 				  struct rte_ether_addr *mac_addr,
27 				  uint32_t index,
28 				  uint32_t vmdq);
29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
31 				      struct rte_ether_addr *mc_addr_set,
32 				      uint32_t nb_mc_addr);
33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
34 				       struct rte_ether_addr *mac_addr,
35 				       uint8_t add);
36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev,
37 					   uint8_t add);
38 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
39 				 int wait_to_complete);
40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
41 			      struct rte_dev_reg_info *regs);
42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
43 				struct rte_eth_stats *stats);
44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
46 				struct rte_eth_xstat *stats,
47 				unsigned int n);
48 static int
49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
50 			   struct rte_eth_xstat_name *xstats_names,
51 			   unsigned int size);
52 static int
53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
54 			   const uint64_t *ids,
55 			   uint64_t *values,
56 			   unsigned int n);
57 static int
58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
59 				 struct rte_eth_xstat_name *xstats_names,
60 				 const uint64_t *ids,
61 				 unsigned int size);
62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
63 static int  axgbe_dev_info_get(struct rte_eth_dev *dev,
64 			       struct rte_eth_dev_info *dev_info);
65 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev,
66 				struct rte_eth_fc_conf *fc_conf);
67 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev,
68 				struct rte_eth_fc_conf *fc_conf);
69 
70 struct axgbe_xstats {
71 	char name[RTE_ETH_XSTATS_NAME_SIZE];
72 	int offset;
73 };
74 
75 #define AXGMAC_MMC_STAT(_string, _var)                           \
76 	{ _string,                                              \
77 	  offsetof(struct axgbe_mmc_stats, _var),       \
78 	}
79 
80 static const struct axgbe_xstats axgbe_xstats_strings[] = {
81 	AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
82 	AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
83 	AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
84 	AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
85 	AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
86 	AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
87 	AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
88 	AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
89 	AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
90 	AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
91 	AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
92 	AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
93 	AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
94 	AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
95 
96 	AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
97 	AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
98 	AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
99 	AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
100 	AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
101 	AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
102 	AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
103 	AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
104 	AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
105 	AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
106 	AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
107 	AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
108 	AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
109 	AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
110 	AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
111 	AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
112 	AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
113 	AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
114 	AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
115 	AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
116 	AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
117 	AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
118 };
119 
120 #define AXGBE_XSTATS_COUNT        ARRAY_SIZE(axgbe_xstats_strings)
121 
122 /* The set of PCI devices this driver supports */
123 #define AMD_PCI_VENDOR_ID       0x1022
124 #define AMD_PCI_RV_ROOT_COMPLEX_ID	0x15d0
125 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
126 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
127 
128 int axgbe_logtype_init;
129 int axgbe_logtype_driver;
130 
131 static const struct rte_pci_id pci_id_axgbe_map[] = {
132 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
133 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
134 	{ .vendor_id = 0, },
135 };
136 
137 static struct axgbe_version_data axgbe_v2a = {
138 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
139 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
140 	.mmc_64bit			= 1,
141 	.tx_max_fifo_size		= 229376,
142 	.rx_max_fifo_size		= 229376,
143 	.tx_tstamp_workaround		= 1,
144 	.ecc_support			= 1,
145 	.i2c_support			= 1,
146 	.an_cdr_workaround		= 1,
147 };
148 
149 static struct axgbe_version_data axgbe_v2b = {
150 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
151 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
152 	.mmc_64bit			= 1,
153 	.tx_max_fifo_size		= 65536,
154 	.rx_max_fifo_size		= 65536,
155 	.tx_tstamp_workaround		= 1,
156 	.ecc_support			= 1,
157 	.i2c_support			= 1,
158 	.an_cdr_workaround		= 1,
159 };
160 
161 static const struct rte_eth_desc_lim rx_desc_lim = {
162 	.nb_max = AXGBE_MAX_RING_DESC,
163 	.nb_min = AXGBE_MIN_RING_DESC,
164 	.nb_align = 8,
165 };
166 
167 static const struct rte_eth_desc_lim tx_desc_lim = {
168 	.nb_max = AXGBE_MAX_RING_DESC,
169 	.nb_min = AXGBE_MIN_RING_DESC,
170 	.nb_align = 8,
171 };
172 
173 static const struct eth_dev_ops axgbe_eth_dev_ops = {
174 	.dev_configure        = axgbe_dev_configure,
175 	.dev_start            = axgbe_dev_start,
176 	.dev_stop             = axgbe_dev_stop,
177 	.dev_close            = axgbe_dev_close,
178 	.promiscuous_enable   = axgbe_dev_promiscuous_enable,
179 	.promiscuous_disable  = axgbe_dev_promiscuous_disable,
180 	.allmulticast_enable  = axgbe_dev_allmulticast_enable,
181 	.allmulticast_disable = axgbe_dev_allmulticast_disable,
182 	.mac_addr_set         = axgbe_dev_mac_addr_set,
183 	.mac_addr_add         = axgbe_dev_mac_addr_add,
184 	.mac_addr_remove      = axgbe_dev_mac_addr_remove,
185 	.set_mc_addr_list     = axgbe_dev_set_mc_addr_list,
186 	.uc_hash_table_set    = axgbe_dev_uc_hash_table_set,
187 	.uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set,
188 	.link_update          = axgbe_dev_link_update,
189 	.get_reg	      = axgbe_dev_get_regs,
190 	.stats_get            = axgbe_dev_stats_get,
191 	.stats_reset          = axgbe_dev_stats_reset,
192 	.xstats_get	      = axgbe_dev_xstats_get,
193 	.xstats_reset	      = axgbe_dev_xstats_reset,
194 	.xstats_get_names     = axgbe_dev_xstats_get_names,
195 	.xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
196 	.xstats_get_by_id     = axgbe_dev_xstats_get_by_id,
197 	.dev_infos_get        = axgbe_dev_info_get,
198 	.rx_queue_setup       = axgbe_dev_rx_queue_setup,
199 	.rx_queue_release     = axgbe_dev_rx_queue_release,
200 	.tx_queue_setup       = axgbe_dev_tx_queue_setup,
201 	.tx_queue_release     = axgbe_dev_tx_queue_release,
202 	.flow_ctrl_get        = axgbe_flow_ctrl_get,
203 	.flow_ctrl_set        = axgbe_flow_ctrl_set,
204 };
205 
206 static int axgbe_phy_reset(struct axgbe_port *pdata)
207 {
208 	pdata->phy_link = -1;
209 	pdata->phy_speed = SPEED_UNKNOWN;
210 	return pdata->phy_if.phy_reset(pdata);
211 }
212 
213 /*
214  * Interrupt handler triggered by NIC  for handling
215  * specific interrupt.
216  *
217  * @param handle
218  *  Pointer to interrupt handle.
219  * @param param
220  *  The address of parameter (struct rte_eth_dev *) regsitered before.
221  *
222  * @return
223  *  void
224  */
225 static void
226 axgbe_dev_interrupt_handler(void *param)
227 {
228 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
229 	struct axgbe_port *pdata = dev->data->dev_private;
230 	unsigned int dma_isr, dma_ch_isr;
231 
232 	pdata->phy_if.an_isr(pdata);
233 	/*DMA related interrupts*/
234 	dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
235 	PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
236 	if (dma_isr) {
237 		if (dma_isr & 1) {
238 			dma_ch_isr =
239 				AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
240 						  pdata->rx_queues[0],
241 						  DMA_CH_SR);
242 			PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
243 			AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
244 					   pdata->rx_queues[0],
245 					   DMA_CH_SR, dma_ch_isr);
246 		}
247 	}
248 	/* Unmask interrupts since disabled after generation */
249 	rte_intr_ack(&pdata->pci_dev->intr_handle);
250 }
251 
252 /*
253  * Configure device link speed and setup link.
254  * It returns 0 on success.
255  */
256 static int
257 axgbe_dev_configure(struct rte_eth_dev *dev)
258 {
259 	struct axgbe_port *pdata =  dev->data->dev_private;
260 	/* Checksum offload to hardware */
261 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
262 				DEV_RX_OFFLOAD_CHECKSUM;
263 	return 0;
264 }
265 
266 static int
267 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
268 {
269 	struct axgbe_port *pdata = dev->data->dev_private;
270 
271 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
272 		pdata->rss_enable = 1;
273 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
274 		pdata->rss_enable = 0;
275 	else
276 		return  -1;
277 	return 0;
278 }
279 
280 static int
281 axgbe_dev_start(struct rte_eth_dev *dev)
282 {
283 	struct axgbe_port *pdata = dev->data->dev_private;
284 	int ret;
285 	struct rte_eth_dev_data *dev_data = dev->data;
286 	uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
287 
288 	dev->dev_ops = &axgbe_eth_dev_ops;
289 
290 	PMD_INIT_FUNC_TRACE();
291 
292 	/* Multiqueue RSS */
293 	ret = axgbe_dev_rx_mq_config(dev);
294 	if (ret) {
295 		PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
296 		return ret;
297 	}
298 	ret = axgbe_phy_reset(pdata);
299 	if (ret) {
300 		PMD_DRV_LOG(ERR, "phy reset failed\n");
301 		return ret;
302 	}
303 	ret = pdata->hw_if.init(pdata);
304 	if (ret) {
305 		PMD_DRV_LOG(ERR, "dev_init failed\n");
306 		return ret;
307 	}
308 
309 	/* enable uio/vfio intr/eventfd mapping */
310 	rte_intr_enable(&pdata->pci_dev->intr_handle);
311 
312 	/* phy start*/
313 	pdata->phy_if.phy_start(pdata);
314 	axgbe_dev_enable_tx(dev);
315 	axgbe_dev_enable_rx(dev);
316 
317 	axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
318 	axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
319 	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
320 				max_pkt_len > pdata->rx_buf_size)
321 		dev_data->scattered_rx = 1;
322 
323 	/*  Scatter Rx handling */
324 	if (dev_data->scattered_rx)
325 		dev->rx_pkt_burst = &eth_axgbe_recv_scattered_pkts;
326 	else
327 		dev->rx_pkt_burst = &axgbe_recv_pkts;
328 
329 	return 0;
330 }
331 
332 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
333 static void
334 axgbe_dev_stop(struct rte_eth_dev *dev)
335 {
336 	struct axgbe_port *pdata = dev->data->dev_private;
337 
338 	PMD_INIT_FUNC_TRACE();
339 
340 	rte_intr_disable(&pdata->pci_dev->intr_handle);
341 
342 	if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
343 		return;
344 
345 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
346 	axgbe_dev_disable_tx(dev);
347 	axgbe_dev_disable_rx(dev);
348 
349 	pdata->phy_if.phy_stop(pdata);
350 	pdata->hw_if.exit(pdata);
351 	memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
352 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
353 }
354 
355 /* Clear all resources like TX/RX queues. */
356 static void
357 axgbe_dev_close(struct rte_eth_dev *dev)
358 {
359 	axgbe_dev_clear_queues(dev);
360 }
361 
362 static int
363 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
364 {
365 	struct axgbe_port *pdata = dev->data->dev_private;
366 
367 	PMD_INIT_FUNC_TRACE();
368 
369 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
370 
371 	return 0;
372 }
373 
374 static int
375 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
376 {
377 	struct axgbe_port *pdata = dev->data->dev_private;
378 
379 	PMD_INIT_FUNC_TRACE();
380 
381 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
382 
383 	return 0;
384 }
385 
386 static int
387 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
388 {
389 	struct axgbe_port *pdata = dev->data->dev_private;
390 
391 	PMD_INIT_FUNC_TRACE();
392 
393 	if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
394 		return 0;
395 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
396 
397 	return 0;
398 }
399 
400 static int
401 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
402 {
403 	struct axgbe_port *pdata = dev->data->dev_private;
404 
405 	PMD_INIT_FUNC_TRACE();
406 
407 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
408 		return 0;
409 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
410 
411 	return 0;
412 }
413 
414 static int
415 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
416 {
417 	struct axgbe_port *pdata = dev->data->dev_private;
418 
419 	/* Set Default MAC Addr */
420 	axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
421 
422 	return 0;
423 }
424 
425 static int
426 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
427 			      uint32_t index, uint32_t pool __rte_unused)
428 {
429 	struct axgbe_port *pdata = dev->data->dev_private;
430 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
431 
432 	if (index > hw_feat->addn_mac) {
433 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
434 		return -EINVAL;
435 	}
436 	axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
437 	return 0;
438 }
439 
440 static void
441 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
442 {
443 	struct axgbe_port *pdata = dev->data->dev_private;
444 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
445 
446 	if (index > hw_feat->addn_mac) {
447 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
448 		return;
449 	}
450 	axgbe_set_mac_addn_addr(pdata, NULL, index);
451 }
452 
453 static int
454 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
455 				      struct rte_ether_addr *mc_addr_set,
456 				      uint32_t nb_mc_addr)
457 {
458 	struct axgbe_port *pdata = dev->data->dev_private;
459 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
460 	uint32_t index = 1; /* 0 is always default mac */
461 	uint32_t i;
462 
463 	if (nb_mc_addr > hw_feat->addn_mac) {
464 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
465 		return -EINVAL;
466 	}
467 
468 	/* clear unicast addresses */
469 	for (i = 1; i < hw_feat->addn_mac; i++) {
470 		if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
471 			continue;
472 		memset(&dev->data->mac_addrs[i], 0,
473 		       sizeof(struct rte_ether_addr));
474 	}
475 
476 	while (nb_mc_addr--)
477 		axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
478 
479 	return 0;
480 }
481 
482 static int
483 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
484 			    struct rte_ether_addr *mac_addr, uint8_t add)
485 {
486 	struct axgbe_port *pdata = dev->data->dev_private;
487 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
488 
489 	if (!hw_feat->hash_table_size) {
490 		PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
491 		return -ENOTSUP;
492 	}
493 
494 	axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add);
495 
496 	if (pdata->uc_hash_mac_addr > 0) {
497 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
498 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
499 	} else {
500 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
501 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
502 	}
503 	return 0;
504 }
505 
506 static int
507 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
508 {
509 	struct axgbe_port *pdata = dev->data->dev_private;
510 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
511 	uint32_t index;
512 
513 	if (!hw_feat->hash_table_size) {
514 		PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
515 		return -ENOTSUP;
516 	}
517 
518 	for (index = 0; index < pdata->hash_table_count; index++) {
519 		if (add)
520 			pdata->uc_hash_table[index] = ~0;
521 		else
522 			pdata->uc_hash_table[index] = 0;
523 
524 		PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
525 			    add ? "set" : "clear", index);
526 
527 		AXGMAC_IOWRITE(pdata, MAC_HTR(index),
528 			       pdata->uc_hash_table[index]);
529 	}
530 
531 	if (add) {
532 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
533 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
534 	} else {
535 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
536 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
537 	}
538 	return 0;
539 }
540 
541 /* return 0 means link status changed, -1 means not changed */
542 static int
543 axgbe_dev_link_update(struct rte_eth_dev *dev,
544 		      int wait_to_complete __rte_unused)
545 {
546 	struct axgbe_port *pdata = dev->data->dev_private;
547 	struct rte_eth_link link;
548 	int ret = 0;
549 
550 	PMD_INIT_FUNC_TRACE();
551 	rte_delay_ms(800);
552 
553 	pdata->phy_if.phy_status(pdata);
554 
555 	memset(&link, 0, sizeof(struct rte_eth_link));
556 	link.link_duplex = pdata->phy.duplex;
557 	link.link_status = pdata->phy_link;
558 	link.link_speed = pdata->phy_speed;
559 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
560 			      ETH_LINK_SPEED_FIXED);
561 	ret = rte_eth_linkstatus_set(dev, &link);
562 	if (ret == -1)
563 		PMD_DRV_LOG(ERR, "No change in link status\n");
564 
565 	return ret;
566 }
567 
568 static int
569 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
570 {
571 	struct axgbe_port *pdata = dev->data->dev_private;
572 
573 	if (regs->data == NULL) {
574 		regs->length = axgbe_regs_get_count(pdata);
575 		regs->width = sizeof(uint32_t);
576 		return 0;
577 	}
578 
579 	/* Only full register dump is supported */
580 	if (regs->length &&
581 	    regs->length != (uint32_t)axgbe_regs_get_count(pdata))
582 		return -ENOTSUP;
583 
584 	regs->version = pdata->pci_dev->id.vendor_id << 16 |
585 			pdata->pci_dev->id.device_id;
586 	axgbe_regs_dump(pdata, regs->data);
587 	return 0;
588 }
589 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
590 {
591 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
592 
593 	/* Freeze counters */
594 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
595 
596 	/* Tx counters */
597 	stats->txoctetcount_gb +=
598 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
599 	stats->txoctetcount_gb +=
600 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
601 
602 	stats->txframecount_gb +=
603 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
604 	stats->txframecount_gb +=
605 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
606 
607 	stats->txbroadcastframes_g +=
608 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
609 	stats->txbroadcastframes_g +=
610 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
611 
612 	stats->txmulticastframes_g +=
613 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
614 	stats->txmulticastframes_g +=
615 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
616 
617 	stats->tx64octets_gb +=
618 		AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
619 	stats->tx64octets_gb +=
620 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
621 
622 	stats->tx65to127octets_gb +=
623 		AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
624 	stats->tx65to127octets_gb +=
625 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
626 
627 	stats->tx128to255octets_gb +=
628 		AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
629 	stats->tx128to255octets_gb +=
630 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
631 
632 	stats->tx256to511octets_gb +=
633 		AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
634 	stats->tx256to511octets_gb +=
635 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
636 
637 	stats->tx512to1023octets_gb +=
638 		AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
639 	stats->tx512to1023octets_gb +=
640 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
641 
642 	stats->tx1024tomaxoctets_gb +=
643 		AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
644 	stats->tx1024tomaxoctets_gb +=
645 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
646 
647 	stats->txunicastframes_gb +=
648 		AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
649 	stats->txunicastframes_gb +=
650 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
651 
652 	stats->txmulticastframes_gb +=
653 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
654 	stats->txmulticastframes_gb +=
655 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
656 
657 	stats->txbroadcastframes_g +=
658 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
659 	stats->txbroadcastframes_g +=
660 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
661 
662 	stats->txunderflowerror +=
663 		AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
664 	stats->txunderflowerror +=
665 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
666 
667 	stats->txoctetcount_g +=
668 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
669 	stats->txoctetcount_g +=
670 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
671 
672 	stats->txframecount_g +=
673 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
674 	stats->txframecount_g +=
675 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
676 
677 	stats->txpauseframes +=
678 		AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
679 	stats->txpauseframes +=
680 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
681 
682 	stats->txvlanframes_g +=
683 		AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
684 	stats->txvlanframes_g +=
685 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
686 
687 	/* Rx counters */
688 	stats->rxframecount_gb +=
689 		AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
690 	stats->rxframecount_gb +=
691 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
692 
693 	stats->rxoctetcount_gb +=
694 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
695 	stats->rxoctetcount_gb +=
696 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
697 
698 	stats->rxoctetcount_g +=
699 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
700 	stats->rxoctetcount_g +=
701 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
702 
703 	stats->rxbroadcastframes_g +=
704 		AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
705 	stats->rxbroadcastframes_g +=
706 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
707 
708 	stats->rxmulticastframes_g +=
709 		AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
710 	stats->rxmulticastframes_g +=
711 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
712 
713 	stats->rxcrcerror +=
714 		AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
715 	stats->rxcrcerror +=
716 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
717 
718 	stats->rxrunterror +=
719 		AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
720 
721 	stats->rxjabbererror +=
722 		AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
723 
724 	stats->rxundersize_g +=
725 		AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
726 
727 	stats->rxoversize_g +=
728 		AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
729 
730 	stats->rx64octets_gb +=
731 		AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
732 	stats->rx64octets_gb +=
733 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
734 
735 	stats->rx65to127octets_gb +=
736 		AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
737 	stats->rx65to127octets_gb +=
738 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
739 
740 	stats->rx128to255octets_gb +=
741 		AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
742 	stats->rx128to255octets_gb +=
743 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
744 
745 	stats->rx256to511octets_gb +=
746 		AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
747 	stats->rx256to511octets_gb +=
748 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
749 
750 	stats->rx512to1023octets_gb +=
751 		AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
752 	stats->rx512to1023octets_gb +=
753 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
754 
755 	stats->rx1024tomaxoctets_gb +=
756 		AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
757 	stats->rx1024tomaxoctets_gb +=
758 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
759 
760 	stats->rxunicastframes_g +=
761 		AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
762 	stats->rxunicastframes_g +=
763 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
764 
765 	stats->rxlengtherror +=
766 		AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
767 	stats->rxlengtherror +=
768 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
769 
770 	stats->rxoutofrangetype +=
771 		AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
772 	stats->rxoutofrangetype +=
773 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
774 
775 	stats->rxpauseframes +=
776 		AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
777 	stats->rxpauseframes +=
778 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
779 
780 	stats->rxfifooverflow +=
781 		AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
782 	stats->rxfifooverflow +=
783 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
784 
785 	stats->rxvlanframes_gb +=
786 		AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
787 	stats->rxvlanframes_gb +=
788 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
789 
790 	stats->rxwatchdogerror +=
791 		AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
792 
793 	/* Un-freeze counters */
794 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
795 }
796 
797 static int
798 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
799 		     unsigned int n)
800 {
801 	struct axgbe_port *pdata = dev->data->dev_private;
802 	unsigned int i;
803 
804 	if (!stats)
805 		return 0;
806 
807 	axgbe_read_mmc_stats(pdata);
808 
809 	for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
810 		stats[i].id = i;
811 		stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
812 				axgbe_xstats_strings[i].offset);
813 	}
814 
815 	return i;
816 }
817 
818 static int
819 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
820 			   struct rte_eth_xstat_name *xstats_names,
821 			   unsigned int n)
822 {
823 	unsigned int i;
824 
825 	if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
826 		for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
827 			snprintf(xstats_names[i].name,
828 				 RTE_ETH_XSTATS_NAME_SIZE, "%s",
829 				 axgbe_xstats_strings[i].name);
830 		}
831 	}
832 
833 	return AXGBE_XSTATS_COUNT;
834 }
835 
836 static int
837 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
838 			   uint64_t *values, unsigned int n)
839 {
840 	unsigned int i;
841 	uint64_t values_copy[AXGBE_XSTATS_COUNT];
842 
843 	if (!ids) {
844 		struct axgbe_port *pdata = dev->data->dev_private;
845 
846 		if (n < AXGBE_XSTATS_COUNT)
847 			return AXGBE_XSTATS_COUNT;
848 
849 		axgbe_read_mmc_stats(pdata);
850 
851 		for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
852 			values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
853 					axgbe_xstats_strings[i].offset);
854 		}
855 
856 		return i;
857 	}
858 
859 	axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
860 
861 	for (i = 0; i < n; i++) {
862 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
863 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
864 			return -1;
865 		}
866 		values[i] = values_copy[ids[i]];
867 	}
868 	return n;
869 }
870 
871 static int
872 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
873 				 struct rte_eth_xstat_name *xstats_names,
874 				 const uint64_t *ids,
875 				 unsigned int size)
876 {
877 	struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
878 	unsigned int i;
879 
880 	if (!ids)
881 		return axgbe_dev_xstats_get_names(dev, xstats_names, size);
882 
883 	axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
884 
885 	for (i = 0; i < size; i++) {
886 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
887 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
888 			return -1;
889 		}
890 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
891 	}
892 	return size;
893 }
894 
895 static int
896 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
897 {
898 	struct axgbe_port *pdata = dev->data->dev_private;
899 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
900 
901 	/* MMC registers are configured for reset on read */
902 	axgbe_read_mmc_stats(pdata);
903 
904 	/* Reset stats */
905 	memset(stats, 0, sizeof(*stats));
906 
907 	return 0;
908 }
909 
910 static int
911 axgbe_dev_stats_get(struct rte_eth_dev *dev,
912 		    struct rte_eth_stats *stats)
913 {
914 	struct axgbe_rx_queue *rxq;
915 	struct axgbe_tx_queue *txq;
916 	struct axgbe_port *pdata = dev->data->dev_private;
917 	struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
918 	unsigned int i;
919 
920 	axgbe_read_mmc_stats(pdata);
921 
922 	stats->imissed = mmc_stats->rxfifooverflow;
923 
924 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
925 		rxq = dev->data->rx_queues[i];
926 		stats->q_ipackets[i] = rxq->pkts;
927 		stats->ipackets += rxq->pkts;
928 		stats->q_ibytes[i] = rxq->bytes;
929 		stats->ibytes += rxq->bytes;
930 		stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
931 		stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed;
932 		stats->ierrors += rxq->errors;
933 	}
934 
935 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
936 		txq = dev->data->tx_queues[i];
937 		stats->q_opackets[i] = txq->pkts;
938 		stats->opackets += txq->pkts;
939 		stats->q_obytes[i] = txq->bytes;
940 		stats->obytes += txq->bytes;
941 		stats->oerrors += txq->errors;
942 	}
943 
944 	return 0;
945 }
946 
947 static int
948 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
949 {
950 	struct axgbe_rx_queue *rxq;
951 	struct axgbe_tx_queue *txq;
952 	unsigned int i;
953 
954 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
955 		rxq = dev->data->rx_queues[i];
956 		rxq->pkts = 0;
957 		rxq->bytes = 0;
958 		rxq->errors = 0;
959 		rxq->rx_mbuf_alloc_failed = 0;
960 	}
961 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
962 		txq = dev->data->tx_queues[i];
963 		txq->pkts = 0;
964 		txq->bytes = 0;
965 		txq->errors = 0;
966 	}
967 
968 	return 0;
969 }
970 
971 static int
972 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
973 {
974 	struct axgbe_port *pdata = dev->data->dev_private;
975 
976 	dev_info->max_rx_queues = pdata->rx_ring_count;
977 	dev_info->max_tx_queues = pdata->tx_ring_count;
978 	dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
979 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
980 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
981 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
982 	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
983 
984 	dev_info->rx_offload_capa =
985 		DEV_RX_OFFLOAD_IPV4_CKSUM |
986 		DEV_RX_OFFLOAD_UDP_CKSUM  |
987 		DEV_RX_OFFLOAD_TCP_CKSUM  |
988 		DEV_RX_OFFLOAD_JUMBO_FRAME	|
989 		DEV_RX_OFFLOAD_SCATTER	  |
990 		DEV_RX_OFFLOAD_KEEP_CRC;
991 
992 	dev_info->tx_offload_capa =
993 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
994 		DEV_TX_OFFLOAD_UDP_CKSUM   |
995 		DEV_TX_OFFLOAD_TCP_CKSUM;
996 
997 	if (pdata->hw_feat.rss) {
998 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
999 		dev_info->reta_size = pdata->hw_feat.hash_table_size;
1000 		dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
1001 	}
1002 
1003 	dev_info->rx_desc_lim = rx_desc_lim;
1004 	dev_info->tx_desc_lim = tx_desc_lim;
1005 
1006 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1007 		.rx_free_thresh = AXGBE_RX_FREE_THRESH,
1008 	};
1009 
1010 	dev_info->default_txconf = (struct rte_eth_txconf) {
1011 		.tx_free_thresh = AXGBE_TX_FREE_THRESH,
1012 	};
1013 
1014 	return 0;
1015 }
1016 
1017 static int
1018 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1019 {
1020 	struct axgbe_port *pdata = dev->data->dev_private;
1021 	struct xgbe_fc_info fc = pdata->fc;
1022 	unsigned int reg, reg_val = 0;
1023 
1024 	reg = MAC_Q0TFCR;
1025 	reg_val = AXGMAC_IOREAD(pdata, reg);
1026 	fc.low_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA);
1027 	fc.high_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD);
1028 	fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT);
1029 	fc.autoneg = pdata->pause_autoneg;
1030 
1031 	if (pdata->rx_pause && pdata->tx_pause)
1032 		fc.mode = RTE_FC_FULL;
1033 	else if (pdata->rx_pause)
1034 		fc.mode = RTE_FC_RX_PAUSE;
1035 	else if (pdata->tx_pause)
1036 		fc.mode = RTE_FC_TX_PAUSE;
1037 	else
1038 		fc.mode = RTE_FC_NONE;
1039 
1040 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
1041 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
1042 	fc_conf->pause_time = fc.pause_time[0];
1043 	fc_conf->send_xon = fc.send_xon;
1044 	fc_conf->mode = fc.mode;
1045 
1046 	return 0;
1047 }
1048 
1049 static int
1050 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1051 {
1052 	struct axgbe_port *pdata = dev->data->dev_private;
1053 	struct xgbe_fc_info fc = pdata->fc;
1054 	unsigned int reg, reg_val = 0;
1055 	reg = MAC_Q0TFCR;
1056 
1057 	pdata->pause_autoneg = fc_conf->autoneg;
1058 	pdata->phy.pause_autoneg = pdata->pause_autoneg;
1059 	fc.send_xon = fc_conf->send_xon;
1060 	AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA,
1061 			AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water));
1062 	AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD,
1063 			AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water));
1064 	AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time);
1065 	AXGMAC_IOWRITE(pdata, reg, reg_val);
1066 	fc.mode = fc_conf->mode;
1067 
1068 	if (fc.mode == RTE_FC_FULL) {
1069 		pdata->tx_pause = 1;
1070 		pdata->rx_pause = 1;
1071 	} else if (fc.mode == RTE_FC_RX_PAUSE) {
1072 		pdata->tx_pause = 0;
1073 		pdata->rx_pause = 1;
1074 	} else if (fc.mode == RTE_FC_TX_PAUSE) {
1075 		pdata->tx_pause = 1;
1076 		pdata->rx_pause = 0;
1077 	} else {
1078 		pdata->tx_pause = 0;
1079 		pdata->rx_pause = 0;
1080 	}
1081 
1082 	if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1083 		pdata->hw_if.config_tx_flow_control(pdata);
1084 
1085 	if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1086 		pdata->hw_if.config_rx_flow_control(pdata);
1087 
1088 	pdata->hw_if.config_flow_control(pdata);
1089 	pdata->phy.tx_pause = pdata->tx_pause;
1090 	pdata->phy.rx_pause = pdata->rx_pause;
1091 
1092 	return 0;
1093 }
1094 
1095 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
1096 {
1097 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
1098 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1099 
1100 	mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
1101 	mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
1102 	mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
1103 
1104 	memset(hw_feat, 0, sizeof(*hw_feat));
1105 
1106 	hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
1107 
1108 	/* Hardware feature register 0 */
1109 	hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
1110 	hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
1111 	hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
1112 	hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
1113 	hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
1114 	hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
1115 	hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
1116 	hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
1117 	hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
1118 	hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
1119 	hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
1120 	hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
1121 					      ADDMACADRSEL);
1122 	hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
1123 	hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
1124 
1125 	/* Hardware feature register 1 */
1126 	hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1127 						RXFIFOSIZE);
1128 	hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1129 						TXFIFOSIZE);
1130 	hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
1131 						 MAC_HWF1R, ADVTHWORD);
1132 	hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
1133 	hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
1134 	hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
1135 	hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
1136 	hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
1137 	hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
1138 	hw_feat->tc_cnt	       = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
1139 	hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1140 						  HASHTBLSZ);
1141 	hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1142 						  L3L4FNUM);
1143 
1144 	/* Hardware feature register 2 */
1145 	hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
1146 	hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
1147 	hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
1148 	hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
1149 	hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
1150 	hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
1151 						AUXSNAPNUM);
1152 
1153 	/* Translate the Hash Table size into actual number */
1154 	switch (hw_feat->hash_table_size) {
1155 	case 0:
1156 		break;
1157 	case 1:
1158 		hw_feat->hash_table_size = 64;
1159 		break;
1160 	case 2:
1161 		hw_feat->hash_table_size = 128;
1162 		break;
1163 	case 3:
1164 		hw_feat->hash_table_size = 256;
1165 		break;
1166 	}
1167 
1168 	/* Translate the address width setting into actual number */
1169 	switch (hw_feat->dma_width) {
1170 	case 0:
1171 		hw_feat->dma_width = 32;
1172 		break;
1173 	case 1:
1174 		hw_feat->dma_width = 40;
1175 		break;
1176 	case 2:
1177 		hw_feat->dma_width = 48;
1178 		break;
1179 	default:
1180 		hw_feat->dma_width = 32;
1181 	}
1182 
1183 	/* The Queue, Channel and TC counts are zero based so increment them
1184 	 * to get the actual number
1185 	 */
1186 	hw_feat->rx_q_cnt++;
1187 	hw_feat->tx_q_cnt++;
1188 	hw_feat->rx_ch_cnt++;
1189 	hw_feat->tx_ch_cnt++;
1190 	hw_feat->tc_cnt++;
1191 
1192 	/* Translate the fifo sizes into actual numbers */
1193 	hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
1194 	hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
1195 }
1196 
1197 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
1198 {
1199 	axgbe_init_function_ptrs_dev(&pdata->hw_if);
1200 	axgbe_init_function_ptrs_phy(&pdata->phy_if);
1201 	axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
1202 	pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
1203 }
1204 
1205 static void axgbe_set_counts(struct axgbe_port *pdata)
1206 {
1207 	/* Set all the function pointers */
1208 	axgbe_init_all_fptrs(pdata);
1209 
1210 	/* Populate the hardware features */
1211 	axgbe_get_all_hw_features(pdata);
1212 
1213 	/* Set default max values if not provided */
1214 	if (!pdata->tx_max_channel_count)
1215 		pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
1216 	if (!pdata->rx_max_channel_count)
1217 		pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
1218 
1219 	if (!pdata->tx_max_q_count)
1220 		pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
1221 	if (!pdata->rx_max_q_count)
1222 		pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
1223 
1224 	/* Calculate the number of Tx and Rx rings to be created
1225 	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
1226 	 *   the number of Tx queues to the number of Tx channels
1227 	 *   enabled
1228 	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
1229 	 *   number of Rx queues or maximum allowed
1230 	 */
1231 	pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
1232 				     pdata->tx_max_channel_count);
1233 	pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
1234 				     pdata->tx_max_q_count);
1235 
1236 	pdata->tx_q_count = pdata->tx_ring_count;
1237 
1238 	pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
1239 				     pdata->rx_max_channel_count);
1240 
1241 	pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
1242 				  pdata->rx_max_q_count);
1243 }
1244 
1245 static void axgbe_default_config(struct axgbe_port *pdata)
1246 {
1247 	pdata->pblx8 = DMA_PBL_X8_ENABLE;
1248 	pdata->tx_sf_mode = MTL_TSF_ENABLE;
1249 	pdata->tx_threshold = MTL_TX_THRESHOLD_64;
1250 	pdata->tx_pbl = DMA_PBL_32;
1251 	pdata->tx_osp_mode = DMA_OSP_ENABLE;
1252 	pdata->rx_sf_mode = MTL_RSF_ENABLE;
1253 	pdata->rx_threshold = MTL_RX_THRESHOLD_64;
1254 	pdata->rx_pbl = DMA_PBL_32;
1255 	pdata->pause_autoneg = 1;
1256 	pdata->tx_pause = 0;
1257 	pdata->rx_pause = 0;
1258 	pdata->phy_speed = SPEED_UNKNOWN;
1259 	pdata->power_down = 0;
1260 }
1261 
1262 static int
1263 pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
1264 {
1265 	const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
1266 	const struct rte_pci_id *pcid = _pci_id;
1267 
1268 	if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
1269 			pdev->id.device_id == pcid->device_id)
1270 		return 0;
1271 	return 1;
1272 }
1273 
1274 static bool
1275 pci_search_device(int device_id)
1276 {
1277 	struct rte_bus *pci_bus;
1278 	struct rte_pci_id dev_id;
1279 
1280 	dev_id.device_id = device_id;
1281 	pci_bus = rte_bus_find_by_name("pci");
1282 	return (pci_bus != NULL) &&
1283 		(pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
1284 }
1285 
1286 /*
1287  * It returns 0 on success.
1288  */
1289 static int
1290 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
1291 {
1292 	PMD_INIT_FUNC_TRACE();
1293 	struct axgbe_port *pdata;
1294 	struct rte_pci_device *pci_dev;
1295 	uint32_t reg, mac_lo, mac_hi;
1296 	uint32_t len;
1297 	int ret;
1298 
1299 	eth_dev->dev_ops = &axgbe_eth_dev_ops;
1300 
1301 	/*
1302 	 * For secondary processes, we don't initialise any further as primary
1303 	 * has already done this work.
1304 	 */
1305 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1306 		return 0;
1307 
1308 	pdata = eth_dev->data->dev_private;
1309 	/* initial state */
1310 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
1311 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
1312 	pdata->eth_dev = eth_dev;
1313 
1314 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1315 	pdata->pci_dev = pci_dev;
1316 
1317 	/*
1318 	 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
1319 	 */
1320 	if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
1321 		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
1322 		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
1323 	} else {
1324 		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
1325 		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
1326 	}
1327 
1328 	pdata->xgmac_regs =
1329 		(void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
1330 	pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
1331 				     + AXGBE_MAC_PROP_OFFSET);
1332 	pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
1333 				    + AXGBE_I2C_CTRL_OFFSET);
1334 	pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
1335 
1336 	/* version specific driver data*/
1337 	if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
1338 		pdata->vdata = &axgbe_v2a;
1339 	else
1340 		pdata->vdata = &axgbe_v2b;
1341 
1342 	/* Configure the PCS indirect addressing support */
1343 	reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
1344 	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
1345 	pdata->xpcs_window <<= 6;
1346 	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
1347 	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
1348 	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
1349 
1350 	PMD_INIT_LOG(DEBUG,
1351 		     "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
1352 		     pdata->xpcs_window_size, pdata->xpcs_window_mask);
1353 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
1354 
1355 	/* Retrieve the MAC address */
1356 	mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
1357 	mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
1358 	pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
1359 	pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
1360 	pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
1361 	pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
1362 	pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
1363 	pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
1364 
1365 	len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
1366 	eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
1367 
1368 	if (!eth_dev->data->mac_addrs) {
1369 		PMD_INIT_LOG(ERR,
1370 			     "Failed to alloc %u bytes needed to "
1371 			     "store MAC addresses", len);
1372 		return -ENOMEM;
1373 	}
1374 
1375 	/* Allocate memory for storing hash filter MAC addresses */
1376 	len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS;
1377 	eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr",
1378 						    len, 0);
1379 
1380 	if (eth_dev->data->hash_mac_addrs == NULL) {
1381 		PMD_INIT_LOG(ERR,
1382 			     "Failed to allocate %d bytes needed to "
1383 			     "store MAC addresses", len);
1384 		return -ENOMEM;
1385 	}
1386 
1387 	if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
1388 		rte_eth_random_addr(pdata->mac_addr.addr_bytes);
1389 
1390 	/* Copy the permanent MAC address */
1391 	rte_ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
1392 
1393 	/* Clock settings */
1394 	pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
1395 	pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1396 
1397 	/* Set the DMA coherency values */
1398 	pdata->coherent = 1;
1399 	pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
1400 	pdata->arcache = AXGBE_DMA_OS_ARCACHE;
1401 	pdata->awcache = AXGBE_DMA_OS_AWCACHE;
1402 
1403 	/* Set the maximum channels and queues */
1404 	reg = XP_IOREAD(pdata, XP_PROP_1);
1405 	pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
1406 	pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
1407 	pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
1408 	pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
1409 
1410 	/* Set the hardware channel and queue counts */
1411 	axgbe_set_counts(pdata);
1412 
1413 	/* Set the maximum fifo amounts */
1414 	reg = XP_IOREAD(pdata, XP_PROP_2);
1415 	pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
1416 	pdata->tx_max_fifo_size *= 16384;
1417 	pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1418 					  pdata->vdata->tx_max_fifo_size);
1419 	pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
1420 	pdata->rx_max_fifo_size *= 16384;
1421 	pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1422 					  pdata->vdata->rx_max_fifo_size);
1423 	/* Issue software reset to DMA */
1424 	ret = pdata->hw_if.exit(pdata);
1425 	if (ret)
1426 		PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
1427 
1428 	/* Set default configuration data */
1429 	axgbe_default_config(pdata);
1430 
1431 	/* Set default max values if not provided */
1432 	if (!pdata->tx_max_fifo_size)
1433 		pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
1434 	if (!pdata->rx_max_fifo_size)
1435 		pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
1436 
1437 	pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
1438 	pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
1439 	pthread_mutex_init(&pdata->xpcs_mutex, NULL);
1440 	pthread_mutex_init(&pdata->i2c_mutex, NULL);
1441 	pthread_mutex_init(&pdata->an_mutex, NULL);
1442 	pthread_mutex_init(&pdata->phy_mutex, NULL);
1443 
1444 	ret = pdata->phy_if.phy_init(pdata);
1445 	if (ret) {
1446 		rte_free(eth_dev->data->mac_addrs);
1447 		eth_dev->data->mac_addrs = NULL;
1448 		return ret;
1449 	}
1450 
1451 	rte_intr_callback_register(&pci_dev->intr_handle,
1452 				   axgbe_dev_interrupt_handler,
1453 				   (void *)eth_dev);
1454 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1455 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
1456 		     pci_dev->id.device_id);
1457 
1458 	return 0;
1459 }
1460 
1461 static int
1462 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1463 {
1464 	struct rte_pci_device *pci_dev;
1465 
1466 	PMD_INIT_FUNC_TRACE();
1467 
1468 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1469 		return 0;
1470 
1471 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1472 	eth_dev->dev_ops = NULL;
1473 	eth_dev->rx_pkt_burst = NULL;
1474 	eth_dev->tx_pkt_burst = NULL;
1475 	axgbe_dev_clear_queues(eth_dev);
1476 
1477 	/* disable uio intr before callback unregister */
1478 	rte_intr_disable(&pci_dev->intr_handle);
1479 	rte_intr_callback_unregister(&pci_dev->intr_handle,
1480 				     axgbe_dev_interrupt_handler,
1481 				     (void *)eth_dev);
1482 
1483 	return 0;
1484 }
1485 
1486 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1487 	struct rte_pci_device *pci_dev)
1488 {
1489 	return rte_eth_dev_pci_generic_probe(pci_dev,
1490 		sizeof(struct axgbe_port), eth_axgbe_dev_init);
1491 }
1492 
1493 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
1494 {
1495 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
1496 }
1497 
1498 static struct rte_pci_driver rte_axgbe_pmd = {
1499 	.id_table = pci_id_axgbe_map,
1500 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1501 	.probe = eth_axgbe_pci_probe,
1502 	.remove = eth_axgbe_pci_remove,
1503 };
1504 
1505 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
1506 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
1507 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1508 
1509 RTE_INIT(axgbe_init_log)
1510 {
1511 	axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
1512 	if (axgbe_logtype_init >= 0)
1513 		rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
1514 	axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
1515 	if (axgbe_logtype_driver >= 0)
1516 		rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
1517 }
1518