xref: /dpdk/drivers/net/axgbe/axgbe_ethdev.c (revision 573ef95dc7c9e4991da41fa1fc3d45c5c4076deb)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10 #include "axgbe_regs.h"
11 
12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
14 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
15 static int  axgbe_dev_start(struct rte_eth_dev *dev);
16 static void axgbe_dev_stop(struct rte_eth_dev *dev);
17 static void axgbe_dev_interrupt_handler(void *param);
18 static void axgbe_dev_close(struct rte_eth_dev *dev);
19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
23 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
24 				 int wait_to_complete);
25 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
26 			      struct rte_dev_reg_info *regs);
27 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
28 				struct rte_eth_stats *stats);
29 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
30 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
31 				struct rte_eth_xstat *stats,
32 				unsigned int n);
33 static int
34 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
35 			   struct rte_eth_xstat_name *xstats_names,
36 			   unsigned int size);
37 static int
38 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
39 			   const uint64_t *ids,
40 			   uint64_t *values,
41 			   unsigned int n);
42 static int
43 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
44 				 struct rte_eth_xstat_name *xstats_names,
45 				 const uint64_t *ids,
46 				 unsigned int size);
47 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
48 static int  axgbe_dev_info_get(struct rte_eth_dev *dev,
49 			       struct rte_eth_dev_info *dev_info);
50 
51 struct axgbe_xstats {
52 	char name[RTE_ETH_XSTATS_NAME_SIZE];
53 	int offset;
54 };
55 
56 #define AXGMAC_MMC_STAT(_string, _var)                           \
57 	{ _string,                                              \
58 	  offsetof(struct axgbe_mmc_stats, _var),       \
59 	}
60 
61 static const struct axgbe_xstats axgbe_xstats_strings[] = {
62 	AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
63 	AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
64 	AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
65 	AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
66 	AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
67 	AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
68 	AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
69 	AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
70 	AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
71 	AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
72 	AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
73 	AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
74 	AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
75 	AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
76 
77 	AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
78 	AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
79 	AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
80 	AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
81 	AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
82 	AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
83 	AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
84 	AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
85 	AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
86 	AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
87 	AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
88 	AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
89 	AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
90 	AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
91 	AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
92 	AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
93 	AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
94 	AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
95 	AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
96 	AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
97 	AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
98 	AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
99 };
100 
101 #define AXGBE_XSTATS_COUNT        ARRAY_SIZE(axgbe_xstats_strings)
102 
103 /* The set of PCI devices this driver supports */
104 #define AMD_PCI_VENDOR_ID       0x1022
105 #define AMD_PCI_RV_ROOT_COMPLEX_ID	0x15d0
106 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
107 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
108 
109 int axgbe_logtype_init;
110 int axgbe_logtype_driver;
111 
112 static const struct rte_pci_id pci_id_axgbe_map[] = {
113 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
114 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
115 	{ .vendor_id = 0, },
116 };
117 
118 static struct axgbe_version_data axgbe_v2a = {
119 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
120 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
121 	.mmc_64bit			= 1,
122 	.tx_max_fifo_size		= 229376,
123 	.rx_max_fifo_size		= 229376,
124 	.tx_tstamp_workaround		= 1,
125 	.ecc_support			= 1,
126 	.i2c_support			= 1,
127 	.an_cdr_workaround		= 1,
128 };
129 
130 static struct axgbe_version_data axgbe_v2b = {
131 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
132 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
133 	.mmc_64bit			= 1,
134 	.tx_max_fifo_size		= 65536,
135 	.rx_max_fifo_size		= 65536,
136 	.tx_tstamp_workaround		= 1,
137 	.ecc_support			= 1,
138 	.i2c_support			= 1,
139 	.an_cdr_workaround		= 1,
140 };
141 
142 static const struct rte_eth_desc_lim rx_desc_lim = {
143 	.nb_max = AXGBE_MAX_RING_DESC,
144 	.nb_min = AXGBE_MIN_RING_DESC,
145 	.nb_align = 8,
146 };
147 
148 static const struct rte_eth_desc_lim tx_desc_lim = {
149 	.nb_max = AXGBE_MAX_RING_DESC,
150 	.nb_min = AXGBE_MIN_RING_DESC,
151 	.nb_align = 8,
152 };
153 
154 static const struct eth_dev_ops axgbe_eth_dev_ops = {
155 	.dev_configure        = axgbe_dev_configure,
156 	.dev_start            = axgbe_dev_start,
157 	.dev_stop             = axgbe_dev_stop,
158 	.dev_close            = axgbe_dev_close,
159 	.promiscuous_enable   = axgbe_dev_promiscuous_enable,
160 	.promiscuous_disable  = axgbe_dev_promiscuous_disable,
161 	.allmulticast_enable  = axgbe_dev_allmulticast_enable,
162 	.allmulticast_disable = axgbe_dev_allmulticast_disable,
163 	.link_update          = axgbe_dev_link_update,
164 	.get_reg	      = axgbe_dev_get_regs,
165 	.stats_get            = axgbe_dev_stats_get,
166 	.stats_reset          = axgbe_dev_stats_reset,
167 	.xstats_get	      = axgbe_dev_xstats_get,
168 	.xstats_reset	      = axgbe_dev_xstats_reset,
169 	.xstats_get_names     = axgbe_dev_xstats_get_names,
170 	.xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
171 	.xstats_get_by_id     = axgbe_dev_xstats_get_by_id,
172 	.dev_infos_get        = axgbe_dev_info_get,
173 	.rx_queue_setup       = axgbe_dev_rx_queue_setup,
174 	.rx_queue_release     = axgbe_dev_rx_queue_release,
175 	.tx_queue_setup       = axgbe_dev_tx_queue_setup,
176 	.tx_queue_release     = axgbe_dev_tx_queue_release,
177 };
178 
179 static int axgbe_phy_reset(struct axgbe_port *pdata)
180 {
181 	pdata->phy_link = -1;
182 	pdata->phy_speed = SPEED_UNKNOWN;
183 	return pdata->phy_if.phy_reset(pdata);
184 }
185 
186 /*
187  * Interrupt handler triggered by NIC  for handling
188  * specific interrupt.
189  *
190  * @param handle
191  *  Pointer to interrupt handle.
192  * @param param
193  *  The address of parameter (struct rte_eth_dev *) regsitered before.
194  *
195  * @return
196  *  void
197  */
198 static void
199 axgbe_dev_interrupt_handler(void *param)
200 {
201 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
202 	struct axgbe_port *pdata = dev->data->dev_private;
203 	unsigned int dma_isr, dma_ch_isr;
204 
205 	pdata->phy_if.an_isr(pdata);
206 	/*DMA related interrupts*/
207 	dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
208 	PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
209 	if (dma_isr) {
210 		if (dma_isr & 1) {
211 			dma_ch_isr =
212 				AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
213 						  pdata->rx_queues[0],
214 						  DMA_CH_SR);
215 			PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
216 			AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
217 					   pdata->rx_queues[0],
218 					   DMA_CH_SR, dma_ch_isr);
219 		}
220 	}
221 	/* Unmask interrupts since disabled after generation */
222 	rte_intr_ack(&pdata->pci_dev->intr_handle);
223 }
224 
225 /*
226  * Configure device link speed and setup link.
227  * It returns 0 on success.
228  */
229 static int
230 axgbe_dev_configure(struct rte_eth_dev *dev)
231 {
232 	struct axgbe_port *pdata =  dev->data->dev_private;
233 	/* Checksum offload to hardware */
234 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
235 				DEV_RX_OFFLOAD_CHECKSUM;
236 	return 0;
237 }
238 
239 static int
240 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
241 {
242 	struct axgbe_port *pdata = dev->data->dev_private;
243 
244 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
245 		pdata->rss_enable = 1;
246 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
247 		pdata->rss_enable = 0;
248 	else
249 		return  -1;
250 	return 0;
251 }
252 
253 static int
254 axgbe_dev_start(struct rte_eth_dev *dev)
255 {
256 	struct axgbe_port *pdata = dev->data->dev_private;
257 	int ret;
258 
259 	PMD_INIT_FUNC_TRACE();
260 
261 	/* Multiqueue RSS */
262 	ret = axgbe_dev_rx_mq_config(dev);
263 	if (ret) {
264 		PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
265 		return ret;
266 	}
267 	ret = axgbe_phy_reset(pdata);
268 	if (ret) {
269 		PMD_DRV_LOG(ERR, "phy reset failed\n");
270 		return ret;
271 	}
272 	ret = pdata->hw_if.init(pdata);
273 	if (ret) {
274 		PMD_DRV_LOG(ERR, "dev_init failed\n");
275 		return ret;
276 	}
277 
278 	/* enable uio/vfio intr/eventfd mapping */
279 	rte_intr_enable(&pdata->pci_dev->intr_handle);
280 
281 	/* phy start*/
282 	pdata->phy_if.phy_start(pdata);
283 	axgbe_dev_enable_tx(dev);
284 	axgbe_dev_enable_rx(dev);
285 
286 	axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
287 	axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
288 	return 0;
289 }
290 
291 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
292 static void
293 axgbe_dev_stop(struct rte_eth_dev *dev)
294 {
295 	struct axgbe_port *pdata = dev->data->dev_private;
296 
297 	PMD_INIT_FUNC_TRACE();
298 
299 	rte_intr_disable(&pdata->pci_dev->intr_handle);
300 
301 	if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
302 		return;
303 
304 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
305 	axgbe_dev_disable_tx(dev);
306 	axgbe_dev_disable_rx(dev);
307 
308 	pdata->phy_if.phy_stop(pdata);
309 	pdata->hw_if.exit(pdata);
310 	memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
311 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
312 }
313 
314 /* Clear all resources like TX/RX queues. */
315 static void
316 axgbe_dev_close(struct rte_eth_dev *dev)
317 {
318 	axgbe_dev_clear_queues(dev);
319 }
320 
321 static int
322 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
323 {
324 	struct axgbe_port *pdata = dev->data->dev_private;
325 
326 	PMD_INIT_FUNC_TRACE();
327 
328 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
329 
330 	return 0;
331 }
332 
333 static int
334 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
335 {
336 	struct axgbe_port *pdata = dev->data->dev_private;
337 
338 	PMD_INIT_FUNC_TRACE();
339 
340 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
341 
342 	return 0;
343 }
344 
345 static int
346 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
347 {
348 	struct axgbe_port *pdata = dev->data->dev_private;
349 
350 	PMD_INIT_FUNC_TRACE();
351 
352 	if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
353 		return 0;
354 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
355 
356 	return 0;
357 }
358 
359 static int
360 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
361 {
362 	struct axgbe_port *pdata = dev->data->dev_private;
363 
364 	PMD_INIT_FUNC_TRACE();
365 
366 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
367 		return 0;
368 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
369 
370 	return 0;
371 }
372 
373 /* return 0 means link status changed, -1 means not changed */
374 static int
375 axgbe_dev_link_update(struct rte_eth_dev *dev,
376 		      int wait_to_complete __rte_unused)
377 {
378 	struct axgbe_port *pdata = dev->data->dev_private;
379 	struct rte_eth_link link;
380 	int ret = 0;
381 
382 	PMD_INIT_FUNC_TRACE();
383 	rte_delay_ms(800);
384 
385 	pdata->phy_if.phy_status(pdata);
386 
387 	memset(&link, 0, sizeof(struct rte_eth_link));
388 	link.link_duplex = pdata->phy.duplex;
389 	link.link_status = pdata->phy_link;
390 	link.link_speed = pdata->phy_speed;
391 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
392 			      ETH_LINK_SPEED_FIXED);
393 	ret = rte_eth_linkstatus_set(dev, &link);
394 	if (ret == -1)
395 		PMD_DRV_LOG(ERR, "No change in link status\n");
396 
397 	return ret;
398 }
399 
400 static int
401 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
402 {
403 	struct axgbe_port *pdata = dev->data->dev_private;
404 
405 	if (regs->data == NULL) {
406 		regs->length = axgbe_regs_get_count(pdata);
407 		regs->width = sizeof(uint32_t);
408 		return 0;
409 	}
410 
411 	/* Only full register dump is supported */
412 	if (regs->length &&
413 	    regs->length != (uint32_t)axgbe_regs_get_count(pdata))
414 		return -ENOTSUP;
415 
416 	regs->version = pdata->pci_dev->id.vendor_id << 16 |
417 			pdata->pci_dev->id.device_id;
418 	axgbe_regs_dump(pdata, regs->data);
419 	return 0;
420 }
421 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
422 {
423 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
424 
425 	/* Freeze counters */
426 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
427 
428 	/* Tx counters */
429 	stats->txoctetcount_gb +=
430 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
431 	stats->txoctetcount_gb +=
432 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
433 
434 	stats->txframecount_gb +=
435 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
436 	stats->txframecount_gb +=
437 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
438 
439 	stats->txbroadcastframes_g +=
440 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
441 	stats->txbroadcastframes_g +=
442 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
443 
444 	stats->txmulticastframes_g +=
445 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
446 	stats->txmulticastframes_g +=
447 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
448 
449 	stats->tx64octets_gb +=
450 		AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
451 	stats->tx64octets_gb +=
452 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
453 
454 	stats->tx65to127octets_gb +=
455 		AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
456 	stats->tx65to127octets_gb +=
457 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
458 
459 	stats->tx128to255octets_gb +=
460 		AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
461 	stats->tx128to255octets_gb +=
462 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
463 
464 	stats->tx256to511octets_gb +=
465 		AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
466 	stats->tx256to511octets_gb +=
467 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
468 
469 	stats->tx512to1023octets_gb +=
470 		AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
471 	stats->tx512to1023octets_gb +=
472 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
473 
474 	stats->tx1024tomaxoctets_gb +=
475 		AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
476 	stats->tx1024tomaxoctets_gb +=
477 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
478 
479 	stats->txunicastframes_gb +=
480 		AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
481 	stats->txunicastframes_gb +=
482 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
483 
484 	stats->txmulticastframes_gb +=
485 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
486 	stats->txmulticastframes_gb +=
487 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
488 
489 	stats->txbroadcastframes_g +=
490 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
491 	stats->txbroadcastframes_g +=
492 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
493 
494 	stats->txunderflowerror +=
495 		AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
496 	stats->txunderflowerror +=
497 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
498 
499 	stats->txoctetcount_g +=
500 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
501 	stats->txoctetcount_g +=
502 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
503 
504 	stats->txframecount_g +=
505 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
506 	stats->txframecount_g +=
507 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
508 
509 	stats->txpauseframes +=
510 		AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
511 	stats->txpauseframes +=
512 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
513 
514 	stats->txvlanframes_g +=
515 		AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
516 	stats->txvlanframes_g +=
517 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
518 
519 	/* Rx counters */
520 	stats->rxframecount_gb +=
521 		AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
522 	stats->rxframecount_gb +=
523 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
524 
525 	stats->rxoctetcount_gb +=
526 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
527 	stats->rxoctetcount_gb +=
528 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
529 
530 	stats->rxoctetcount_g +=
531 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
532 	stats->rxoctetcount_g +=
533 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
534 
535 	stats->rxbroadcastframes_g +=
536 		AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
537 	stats->rxbroadcastframes_g +=
538 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
539 
540 	stats->rxmulticastframes_g +=
541 		AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
542 	stats->rxmulticastframes_g +=
543 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
544 
545 	stats->rxcrcerror +=
546 		AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
547 	stats->rxcrcerror +=
548 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
549 
550 	stats->rxrunterror +=
551 		AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
552 
553 	stats->rxjabbererror +=
554 		AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
555 
556 	stats->rxundersize_g +=
557 		AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
558 
559 	stats->rxoversize_g +=
560 		AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
561 
562 	stats->rx64octets_gb +=
563 		AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
564 	stats->rx64octets_gb +=
565 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
566 
567 	stats->rx65to127octets_gb +=
568 		AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
569 	stats->rx65to127octets_gb +=
570 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
571 
572 	stats->rx128to255octets_gb +=
573 		AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
574 	stats->rx128to255octets_gb +=
575 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
576 
577 	stats->rx256to511octets_gb +=
578 		AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
579 	stats->rx256to511octets_gb +=
580 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
581 
582 	stats->rx512to1023octets_gb +=
583 		AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
584 	stats->rx512to1023octets_gb +=
585 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
586 
587 	stats->rx1024tomaxoctets_gb +=
588 		AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
589 	stats->rx1024tomaxoctets_gb +=
590 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
591 
592 	stats->rxunicastframes_g +=
593 		AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
594 	stats->rxunicastframes_g +=
595 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
596 
597 	stats->rxlengtherror +=
598 		AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
599 	stats->rxlengtherror +=
600 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
601 
602 	stats->rxoutofrangetype +=
603 		AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
604 	stats->rxoutofrangetype +=
605 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
606 
607 	stats->rxpauseframes +=
608 		AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
609 	stats->rxpauseframes +=
610 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
611 
612 	stats->rxfifooverflow +=
613 		AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
614 	stats->rxfifooverflow +=
615 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
616 
617 	stats->rxvlanframes_gb +=
618 		AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
619 	stats->rxvlanframes_gb +=
620 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
621 
622 	stats->rxwatchdogerror +=
623 		AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
624 
625 	/* Un-freeze counters */
626 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
627 }
628 
629 static int
630 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
631 		     unsigned int n)
632 {
633 	struct axgbe_port *pdata = dev->data->dev_private;
634 	unsigned int i;
635 
636 	if (!stats)
637 		return 0;
638 
639 	axgbe_read_mmc_stats(pdata);
640 
641 	for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
642 		stats[i].id = i;
643 		stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
644 				axgbe_xstats_strings[i].offset);
645 	}
646 
647 	return i;
648 }
649 
650 static int
651 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
652 			   struct rte_eth_xstat_name *xstats_names,
653 			   unsigned int n)
654 {
655 	unsigned int i;
656 
657 	if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
658 		for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
659 			snprintf(xstats_names[i].name,
660 				 RTE_ETH_XSTATS_NAME_SIZE, "%s",
661 				 axgbe_xstats_strings[i].name);
662 		}
663 	}
664 
665 	return AXGBE_XSTATS_COUNT;
666 }
667 
668 static int
669 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
670 			   uint64_t *values, unsigned int n)
671 {
672 	unsigned int i;
673 	uint64_t values_copy[AXGBE_XSTATS_COUNT];
674 
675 	if (!ids) {
676 		struct axgbe_port *pdata = dev->data->dev_private;
677 
678 		if (n < AXGBE_XSTATS_COUNT)
679 			return AXGBE_XSTATS_COUNT;
680 
681 		axgbe_read_mmc_stats(pdata);
682 
683 		for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
684 			values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
685 					axgbe_xstats_strings[i].offset);
686 		}
687 
688 		return i;
689 	}
690 
691 	axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
692 
693 	for (i = 0; i < n; i++) {
694 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
695 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
696 			return -1;
697 		}
698 		values[i] = values_copy[ids[i]];
699 	}
700 	return n;
701 }
702 
703 static int
704 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
705 				 struct rte_eth_xstat_name *xstats_names,
706 				 const uint64_t *ids,
707 				 unsigned int size)
708 {
709 	struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
710 	unsigned int i;
711 
712 	if (!ids)
713 		return axgbe_dev_xstats_get_names(dev, xstats_names, size);
714 
715 	axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
716 
717 	for (i = 0; i < size; i++) {
718 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
719 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
720 			return -1;
721 		}
722 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
723 	}
724 	return size;
725 }
726 
727 static int
728 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
729 {
730 	struct axgbe_port *pdata = dev->data->dev_private;
731 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
732 
733 	/* MMC registers are configured for reset on read */
734 	axgbe_read_mmc_stats(pdata);
735 
736 	/* Reset stats */
737 	memset(stats, 0, sizeof(*stats));
738 
739 	return 0;
740 }
741 
742 static int
743 axgbe_dev_stats_get(struct rte_eth_dev *dev,
744 		    struct rte_eth_stats *stats)
745 {
746 	struct axgbe_rx_queue *rxq;
747 	struct axgbe_tx_queue *txq;
748 	struct axgbe_port *pdata = dev->data->dev_private;
749 	struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
750 	unsigned int i;
751 
752 	axgbe_read_mmc_stats(pdata);
753 
754 	stats->imissed = mmc_stats->rxfifooverflow;
755 
756 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
757 		rxq = dev->data->rx_queues[i];
758 		stats->q_ipackets[i] = rxq->pkts;
759 		stats->ipackets += rxq->pkts;
760 		stats->q_ibytes[i] = rxq->bytes;
761 		stats->ibytes += rxq->bytes;
762 		stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
763 		stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed;
764 		stats->ierrors += rxq->errors;
765 	}
766 
767 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
768 		txq = dev->data->tx_queues[i];
769 		stats->q_opackets[i] = txq->pkts;
770 		stats->opackets += txq->pkts;
771 		stats->q_obytes[i] = txq->bytes;
772 		stats->obytes += txq->bytes;
773 		stats->oerrors += txq->errors;
774 	}
775 
776 	return 0;
777 }
778 
779 static int
780 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
781 {
782 	struct axgbe_rx_queue *rxq;
783 	struct axgbe_tx_queue *txq;
784 	unsigned int i;
785 
786 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
787 		rxq = dev->data->rx_queues[i];
788 		rxq->pkts = 0;
789 		rxq->bytes = 0;
790 		rxq->errors = 0;
791 		rxq->rx_mbuf_alloc_failed = 0;
792 	}
793 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
794 		txq = dev->data->tx_queues[i];
795 		txq->pkts = 0;
796 		txq->bytes = 0;
797 		txq->errors = 0;
798 	}
799 
800 	return 0;
801 }
802 
803 static int
804 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
805 {
806 	struct axgbe_port *pdata = dev->data->dev_private;
807 
808 	dev_info->max_rx_queues = pdata->rx_ring_count;
809 	dev_info->max_tx_queues = pdata->tx_ring_count;
810 	dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
811 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
812 	dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
813 	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
814 
815 	dev_info->rx_offload_capa =
816 		DEV_RX_OFFLOAD_IPV4_CKSUM |
817 		DEV_RX_OFFLOAD_UDP_CKSUM  |
818 		DEV_RX_OFFLOAD_TCP_CKSUM  |
819 		DEV_RX_OFFLOAD_KEEP_CRC;
820 
821 	dev_info->tx_offload_capa =
822 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
823 		DEV_TX_OFFLOAD_UDP_CKSUM   |
824 		DEV_TX_OFFLOAD_TCP_CKSUM;
825 
826 	if (pdata->hw_feat.rss) {
827 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
828 		dev_info->reta_size = pdata->hw_feat.hash_table_size;
829 		dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
830 	}
831 
832 	dev_info->rx_desc_lim = rx_desc_lim;
833 	dev_info->tx_desc_lim = tx_desc_lim;
834 
835 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
836 		.rx_free_thresh = AXGBE_RX_FREE_THRESH,
837 	};
838 
839 	dev_info->default_txconf = (struct rte_eth_txconf) {
840 		.tx_free_thresh = AXGBE_TX_FREE_THRESH,
841 	};
842 
843 	return 0;
844 }
845 
846 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
847 {
848 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
849 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
850 
851 	mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
852 	mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
853 	mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
854 
855 	memset(hw_feat, 0, sizeof(*hw_feat));
856 
857 	hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
858 
859 	/* Hardware feature register 0 */
860 	hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
861 	hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
862 	hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
863 	hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
864 	hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
865 	hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
866 	hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
867 	hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
868 	hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
869 	hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
870 	hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
871 	hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
872 					      ADDMACADRSEL);
873 	hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
874 	hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
875 
876 	/* Hardware feature register 1 */
877 	hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
878 						RXFIFOSIZE);
879 	hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
880 						TXFIFOSIZE);
881 	hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
882 						 MAC_HWF1R, ADVTHWORD);
883 	hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
884 	hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
885 	hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
886 	hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
887 	hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
888 	hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
889 	hw_feat->tc_cnt	       = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
890 	hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
891 						  HASHTBLSZ);
892 	hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
893 						  L3L4FNUM);
894 
895 	/* Hardware feature register 2 */
896 	hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
897 	hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
898 	hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
899 	hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
900 	hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
901 	hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
902 						AUXSNAPNUM);
903 
904 	/* Translate the Hash Table size into actual number */
905 	switch (hw_feat->hash_table_size) {
906 	case 0:
907 		break;
908 	case 1:
909 		hw_feat->hash_table_size = 64;
910 		break;
911 	case 2:
912 		hw_feat->hash_table_size = 128;
913 		break;
914 	case 3:
915 		hw_feat->hash_table_size = 256;
916 		break;
917 	}
918 
919 	/* Translate the address width setting into actual number */
920 	switch (hw_feat->dma_width) {
921 	case 0:
922 		hw_feat->dma_width = 32;
923 		break;
924 	case 1:
925 		hw_feat->dma_width = 40;
926 		break;
927 	case 2:
928 		hw_feat->dma_width = 48;
929 		break;
930 	default:
931 		hw_feat->dma_width = 32;
932 	}
933 
934 	/* The Queue, Channel and TC counts are zero based so increment them
935 	 * to get the actual number
936 	 */
937 	hw_feat->rx_q_cnt++;
938 	hw_feat->tx_q_cnt++;
939 	hw_feat->rx_ch_cnt++;
940 	hw_feat->tx_ch_cnt++;
941 	hw_feat->tc_cnt++;
942 
943 	/* Translate the fifo sizes into actual numbers */
944 	hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
945 	hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
946 }
947 
948 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
949 {
950 	axgbe_init_function_ptrs_dev(&pdata->hw_if);
951 	axgbe_init_function_ptrs_phy(&pdata->phy_if);
952 	axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
953 	pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
954 }
955 
956 static void axgbe_set_counts(struct axgbe_port *pdata)
957 {
958 	/* Set all the function pointers */
959 	axgbe_init_all_fptrs(pdata);
960 
961 	/* Populate the hardware features */
962 	axgbe_get_all_hw_features(pdata);
963 
964 	/* Set default max values if not provided */
965 	if (!pdata->tx_max_channel_count)
966 		pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
967 	if (!pdata->rx_max_channel_count)
968 		pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
969 
970 	if (!pdata->tx_max_q_count)
971 		pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
972 	if (!pdata->rx_max_q_count)
973 		pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
974 
975 	/* Calculate the number of Tx and Rx rings to be created
976 	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
977 	 *   the number of Tx queues to the number of Tx channels
978 	 *   enabled
979 	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
980 	 *   number of Rx queues or maximum allowed
981 	 */
982 	pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
983 				     pdata->tx_max_channel_count);
984 	pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
985 				     pdata->tx_max_q_count);
986 
987 	pdata->tx_q_count = pdata->tx_ring_count;
988 
989 	pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
990 				     pdata->rx_max_channel_count);
991 
992 	pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
993 				  pdata->rx_max_q_count);
994 }
995 
996 static void axgbe_default_config(struct axgbe_port *pdata)
997 {
998 	pdata->pblx8 = DMA_PBL_X8_ENABLE;
999 	pdata->tx_sf_mode = MTL_TSF_ENABLE;
1000 	pdata->tx_threshold = MTL_TX_THRESHOLD_64;
1001 	pdata->tx_pbl = DMA_PBL_32;
1002 	pdata->tx_osp_mode = DMA_OSP_ENABLE;
1003 	pdata->rx_sf_mode = MTL_RSF_ENABLE;
1004 	pdata->rx_threshold = MTL_RX_THRESHOLD_64;
1005 	pdata->rx_pbl = DMA_PBL_32;
1006 	pdata->pause_autoneg = 1;
1007 	pdata->tx_pause = 0;
1008 	pdata->rx_pause = 0;
1009 	pdata->phy_speed = SPEED_UNKNOWN;
1010 	pdata->power_down = 0;
1011 }
1012 
1013 static int
1014 pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
1015 {
1016 	const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
1017 	const struct rte_pci_id *pcid = _pci_id;
1018 
1019 	if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
1020 			pdev->id.device_id == pcid->device_id)
1021 		return 0;
1022 	return 1;
1023 }
1024 
1025 static bool
1026 pci_search_device(int device_id)
1027 {
1028 	struct rte_bus *pci_bus;
1029 	struct rte_pci_id dev_id;
1030 
1031 	dev_id.device_id = device_id;
1032 	pci_bus = rte_bus_find_by_name("pci");
1033 	return (pci_bus != NULL) &&
1034 		(pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
1035 }
1036 
1037 /*
1038  * It returns 0 on success.
1039  */
1040 static int
1041 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
1042 {
1043 	PMD_INIT_FUNC_TRACE();
1044 	struct axgbe_port *pdata;
1045 	struct rte_pci_device *pci_dev;
1046 	uint32_t reg, mac_lo, mac_hi;
1047 	int ret;
1048 
1049 	eth_dev->dev_ops = &axgbe_eth_dev_ops;
1050 	eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
1051 
1052 	/*
1053 	 * For secondary processes, we don't initialise any further as primary
1054 	 * has already done this work.
1055 	 */
1056 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1057 		return 0;
1058 
1059 	pdata = eth_dev->data->dev_private;
1060 	/* initial state */
1061 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
1062 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
1063 	pdata->eth_dev = eth_dev;
1064 
1065 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1066 	pdata->pci_dev = pci_dev;
1067 
1068 	/*
1069 	 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
1070 	 */
1071 	if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
1072 		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
1073 		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
1074 	} else {
1075 		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
1076 		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
1077 	}
1078 
1079 	pdata->xgmac_regs =
1080 		(void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
1081 	pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
1082 				     + AXGBE_MAC_PROP_OFFSET);
1083 	pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
1084 				    + AXGBE_I2C_CTRL_OFFSET);
1085 	pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
1086 
1087 	/* version specific driver data*/
1088 	if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
1089 		pdata->vdata = &axgbe_v2a;
1090 	else
1091 		pdata->vdata = &axgbe_v2b;
1092 
1093 	/* Configure the PCS indirect addressing support */
1094 	reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
1095 	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
1096 	pdata->xpcs_window <<= 6;
1097 	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
1098 	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
1099 	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
1100 
1101 	PMD_INIT_LOG(DEBUG,
1102 		     "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
1103 		     pdata->xpcs_window_size, pdata->xpcs_window_mask);
1104 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
1105 
1106 	/* Retrieve the MAC address */
1107 	mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
1108 	mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
1109 	pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
1110 	pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
1111 	pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
1112 	pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
1113 	pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
1114 	pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
1115 
1116 	eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
1117 					       RTE_ETHER_ADDR_LEN, 0);
1118 	if (!eth_dev->data->mac_addrs) {
1119 		PMD_INIT_LOG(ERR,
1120 			     "Failed to alloc %u bytes needed to store MAC addr tbl",
1121 			     RTE_ETHER_ADDR_LEN);
1122 		return -ENOMEM;
1123 	}
1124 
1125 	if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
1126 		rte_eth_random_addr(pdata->mac_addr.addr_bytes);
1127 
1128 	/* Copy the permanent MAC address */
1129 	rte_ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
1130 
1131 	/* Clock settings */
1132 	pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
1133 	pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1134 
1135 	/* Set the DMA coherency values */
1136 	pdata->coherent = 1;
1137 	pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
1138 	pdata->arcache = AXGBE_DMA_OS_ARCACHE;
1139 	pdata->awcache = AXGBE_DMA_OS_AWCACHE;
1140 
1141 	/* Set the maximum channels and queues */
1142 	reg = XP_IOREAD(pdata, XP_PROP_1);
1143 	pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
1144 	pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
1145 	pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
1146 	pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
1147 
1148 	/* Set the hardware channel and queue counts */
1149 	axgbe_set_counts(pdata);
1150 
1151 	/* Set the maximum fifo amounts */
1152 	reg = XP_IOREAD(pdata, XP_PROP_2);
1153 	pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
1154 	pdata->tx_max_fifo_size *= 16384;
1155 	pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1156 					  pdata->vdata->tx_max_fifo_size);
1157 	pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
1158 	pdata->rx_max_fifo_size *= 16384;
1159 	pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1160 					  pdata->vdata->rx_max_fifo_size);
1161 	/* Issue software reset to DMA */
1162 	ret = pdata->hw_if.exit(pdata);
1163 	if (ret)
1164 		PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
1165 
1166 	/* Set default configuration data */
1167 	axgbe_default_config(pdata);
1168 
1169 	/* Set default max values if not provided */
1170 	if (!pdata->tx_max_fifo_size)
1171 		pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
1172 	if (!pdata->rx_max_fifo_size)
1173 		pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
1174 
1175 	pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
1176 	pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
1177 	pthread_mutex_init(&pdata->xpcs_mutex, NULL);
1178 	pthread_mutex_init(&pdata->i2c_mutex, NULL);
1179 	pthread_mutex_init(&pdata->an_mutex, NULL);
1180 	pthread_mutex_init(&pdata->phy_mutex, NULL);
1181 
1182 	ret = pdata->phy_if.phy_init(pdata);
1183 	if (ret) {
1184 		rte_free(eth_dev->data->mac_addrs);
1185 		eth_dev->data->mac_addrs = NULL;
1186 		return ret;
1187 	}
1188 
1189 	rte_intr_callback_register(&pci_dev->intr_handle,
1190 				   axgbe_dev_interrupt_handler,
1191 				   (void *)eth_dev);
1192 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1193 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
1194 		     pci_dev->id.device_id);
1195 
1196 	return 0;
1197 }
1198 
1199 static int
1200 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1201 {
1202 	struct rte_pci_device *pci_dev;
1203 
1204 	PMD_INIT_FUNC_TRACE();
1205 
1206 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1207 		return 0;
1208 
1209 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1210 	eth_dev->dev_ops = NULL;
1211 	eth_dev->rx_pkt_burst = NULL;
1212 	eth_dev->tx_pkt_burst = NULL;
1213 	axgbe_dev_clear_queues(eth_dev);
1214 
1215 	/* disable uio intr before callback unregister */
1216 	rte_intr_disable(&pci_dev->intr_handle);
1217 	rte_intr_callback_unregister(&pci_dev->intr_handle,
1218 				     axgbe_dev_interrupt_handler,
1219 				     (void *)eth_dev);
1220 
1221 	return 0;
1222 }
1223 
1224 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1225 	struct rte_pci_device *pci_dev)
1226 {
1227 	return rte_eth_dev_pci_generic_probe(pci_dev,
1228 		sizeof(struct axgbe_port), eth_axgbe_dev_init);
1229 }
1230 
1231 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
1232 {
1233 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
1234 }
1235 
1236 static struct rte_pci_driver rte_axgbe_pmd = {
1237 	.id_table = pci_id_axgbe_map,
1238 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1239 	.probe = eth_axgbe_pci_probe,
1240 	.remove = eth_axgbe_pci_remove,
1241 };
1242 
1243 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
1244 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
1245 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1246 
1247 RTE_INIT(axgbe_init_log)
1248 {
1249 	axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
1250 	if (axgbe_logtype_init >= 0)
1251 		rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
1252 	axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
1253 	if (axgbe_logtype_driver >= 0)
1254 		rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
1255 }
1256