xref: /dpdk/drivers/net/axgbe/axgbe_ethdev.c (revision e4373bf1b3f51715bf66e87c0134e2c217e4612c)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10 
11 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
12 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
13 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
14 static int  axgbe_dev_start(struct rte_eth_dev *dev);
15 static void axgbe_dev_stop(struct rte_eth_dev *dev);
16 static void axgbe_dev_interrupt_handler(void *param);
17 static void axgbe_dev_close(struct rte_eth_dev *dev);
18 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
19 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
20 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
21 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
22 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
23 				 int wait_to_complete);
24 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
25 				struct rte_eth_stats *stats);
26 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
27 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
28 				struct rte_eth_xstat *stats,
29 				unsigned int n);
30 static int
31 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
32 			   struct rte_eth_xstat_name *xstats_names,
33 			   unsigned int size);
34 static int
35 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
36 			   const uint64_t *ids,
37 			   uint64_t *values,
38 			   unsigned int n);
39 static int
40 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
41 				 struct rte_eth_xstat_name *xstats_names,
42 				 const uint64_t *ids,
43 				 unsigned int size);
44 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
45 static int  axgbe_dev_info_get(struct rte_eth_dev *dev,
46 			       struct rte_eth_dev_info *dev_info);
47 
48 struct axgbe_xstats {
49 	char name[RTE_ETH_XSTATS_NAME_SIZE];
50 	int offset;
51 };
52 
53 #define AXGMAC_MMC_STAT(_string, _var)                           \
54 	{ _string,                                              \
55 	  offsetof(struct axgbe_mmc_stats, _var),       \
56 	}
57 
58 static const struct axgbe_xstats axgbe_xstats_strings[] = {
59 	AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
60 	AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
61 	AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
62 	AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
63 	AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
64 	AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
65 	AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
66 	AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
67 	AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
68 	AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
69 	AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
70 	AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
71 	AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
72 	AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
73 
74 	AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
75 	AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
76 	AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
77 	AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
78 	AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
79 	AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
80 	AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
81 	AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
82 	AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
83 	AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
84 	AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
85 	AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
86 	AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
87 	AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
88 	AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
89 	AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
90 	AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
91 	AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
92 	AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
93 	AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
94 	AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
95 	AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
96 };
97 
98 #define AXGBE_XSTATS_COUNT        ARRAY_SIZE(axgbe_xstats_strings)
99 
100 /* The set of PCI devices this driver supports */
101 #define AMD_PCI_VENDOR_ID       0x1022
102 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
103 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
104 
105 int axgbe_logtype_init;
106 int axgbe_logtype_driver;
107 
108 static const struct rte_pci_id pci_id_axgbe_map[] = {
109 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
110 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
111 	{ .vendor_id = 0, },
112 };
113 
114 static struct axgbe_version_data axgbe_v2a = {
115 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
116 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
117 	.mmc_64bit			= 1,
118 	.tx_max_fifo_size		= 229376,
119 	.rx_max_fifo_size		= 229376,
120 	.tx_tstamp_workaround		= 1,
121 	.ecc_support			= 1,
122 	.i2c_support			= 1,
123 	.an_cdr_workaround		= 1,
124 };
125 
126 static struct axgbe_version_data axgbe_v2b = {
127 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
128 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
129 	.mmc_64bit			= 1,
130 	.tx_max_fifo_size		= 65536,
131 	.rx_max_fifo_size		= 65536,
132 	.tx_tstamp_workaround		= 1,
133 	.ecc_support			= 1,
134 	.i2c_support			= 1,
135 	.an_cdr_workaround		= 1,
136 };
137 
138 static const struct rte_eth_desc_lim rx_desc_lim = {
139 	.nb_max = AXGBE_MAX_RING_DESC,
140 	.nb_min = AXGBE_MIN_RING_DESC,
141 	.nb_align = 8,
142 };
143 
144 static const struct rte_eth_desc_lim tx_desc_lim = {
145 	.nb_max = AXGBE_MAX_RING_DESC,
146 	.nb_min = AXGBE_MIN_RING_DESC,
147 	.nb_align = 8,
148 };
149 
150 static const struct eth_dev_ops axgbe_eth_dev_ops = {
151 	.dev_configure        = axgbe_dev_configure,
152 	.dev_start            = axgbe_dev_start,
153 	.dev_stop             = axgbe_dev_stop,
154 	.dev_close            = axgbe_dev_close,
155 	.promiscuous_enable   = axgbe_dev_promiscuous_enable,
156 	.promiscuous_disable  = axgbe_dev_promiscuous_disable,
157 	.allmulticast_enable  = axgbe_dev_allmulticast_enable,
158 	.allmulticast_disable = axgbe_dev_allmulticast_disable,
159 	.link_update          = axgbe_dev_link_update,
160 	.stats_get            = axgbe_dev_stats_get,
161 	.stats_reset          = axgbe_dev_stats_reset,
162 	.xstats_get	      = axgbe_dev_xstats_get,
163 	.xstats_reset	      = axgbe_dev_xstats_reset,
164 	.xstats_get_names     = axgbe_dev_xstats_get_names,
165 	.xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
166 	.xstats_get_by_id     = axgbe_dev_xstats_get_by_id,
167 	.dev_infos_get        = axgbe_dev_info_get,
168 	.rx_queue_setup       = axgbe_dev_rx_queue_setup,
169 	.rx_queue_release     = axgbe_dev_rx_queue_release,
170 	.tx_queue_setup       = axgbe_dev_tx_queue_setup,
171 	.tx_queue_release     = axgbe_dev_tx_queue_release,
172 };
173 
174 static int axgbe_phy_reset(struct axgbe_port *pdata)
175 {
176 	pdata->phy_link = -1;
177 	pdata->phy_speed = SPEED_UNKNOWN;
178 	return pdata->phy_if.phy_reset(pdata);
179 }
180 
181 /*
182  * Interrupt handler triggered by NIC  for handling
183  * specific interrupt.
184  *
185  * @param handle
186  *  Pointer to interrupt handle.
187  * @param param
188  *  The address of parameter (struct rte_eth_dev *) regsitered before.
189  *
190  * @return
191  *  void
192  */
193 static void
194 axgbe_dev_interrupt_handler(void *param)
195 {
196 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
197 	struct axgbe_port *pdata = dev->data->dev_private;
198 	unsigned int dma_isr, dma_ch_isr;
199 
200 	pdata->phy_if.an_isr(pdata);
201 	/*DMA related interrupts*/
202 	dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
203 	if (dma_isr) {
204 		if (dma_isr & 1) {
205 			dma_ch_isr =
206 				AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
207 						  pdata->rx_queues[0],
208 						  DMA_CH_SR);
209 			AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
210 					   pdata->rx_queues[0],
211 					   DMA_CH_SR, dma_ch_isr);
212 		}
213 	}
214 	/* Unmask interrupts since disabled after generation */
215 	rte_intr_ack(&pdata->pci_dev->intr_handle);
216 }
217 
218 /*
219  * Configure device link speed and setup link.
220  * It returns 0 on success.
221  */
222 static int
223 axgbe_dev_configure(struct rte_eth_dev *dev)
224 {
225 	struct axgbe_port *pdata =  dev->data->dev_private;
226 	/* Checksum offload to hardware */
227 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
228 				DEV_RX_OFFLOAD_CHECKSUM;
229 	return 0;
230 }
231 
232 static int
233 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
234 {
235 	struct axgbe_port *pdata = dev->data->dev_private;
236 
237 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
238 		pdata->rss_enable = 1;
239 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
240 		pdata->rss_enable = 0;
241 	else
242 		return  -1;
243 	return 0;
244 }
245 
246 static int
247 axgbe_dev_start(struct rte_eth_dev *dev)
248 {
249 	struct axgbe_port *pdata = dev->data->dev_private;
250 	int ret;
251 
252 	PMD_INIT_FUNC_TRACE();
253 
254 	/* Multiqueue RSS */
255 	ret = axgbe_dev_rx_mq_config(dev);
256 	if (ret) {
257 		PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
258 		return ret;
259 	}
260 	ret = axgbe_phy_reset(pdata);
261 	if (ret) {
262 		PMD_DRV_LOG(ERR, "phy reset failed\n");
263 		return ret;
264 	}
265 	ret = pdata->hw_if.init(pdata);
266 	if (ret) {
267 		PMD_DRV_LOG(ERR, "dev_init failed\n");
268 		return ret;
269 	}
270 
271 	/* enable uio/vfio intr/eventfd mapping */
272 	rte_intr_enable(&pdata->pci_dev->intr_handle);
273 
274 	/* phy start*/
275 	pdata->phy_if.phy_start(pdata);
276 	axgbe_dev_enable_tx(dev);
277 	axgbe_dev_enable_rx(dev);
278 
279 	axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
280 	axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
281 	return 0;
282 }
283 
284 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
285 static void
286 axgbe_dev_stop(struct rte_eth_dev *dev)
287 {
288 	struct axgbe_port *pdata = dev->data->dev_private;
289 
290 	PMD_INIT_FUNC_TRACE();
291 
292 	rte_intr_disable(&pdata->pci_dev->intr_handle);
293 
294 	if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
295 		return;
296 
297 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
298 	axgbe_dev_disable_tx(dev);
299 	axgbe_dev_disable_rx(dev);
300 
301 	pdata->phy_if.phy_stop(pdata);
302 	pdata->hw_if.exit(pdata);
303 	memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
304 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
305 }
306 
307 /* Clear all resources like TX/RX queues. */
308 static void
309 axgbe_dev_close(struct rte_eth_dev *dev)
310 {
311 	axgbe_dev_clear_queues(dev);
312 }
313 
314 static int
315 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
316 {
317 	struct axgbe_port *pdata = dev->data->dev_private;
318 
319 	PMD_INIT_FUNC_TRACE();
320 
321 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
322 
323 	return 0;
324 }
325 
326 static int
327 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
328 {
329 	struct axgbe_port *pdata = dev->data->dev_private;
330 
331 	PMD_INIT_FUNC_TRACE();
332 
333 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
334 
335 	return 0;
336 }
337 
338 static int
339 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
340 {
341 	struct axgbe_port *pdata = dev->data->dev_private;
342 
343 	PMD_INIT_FUNC_TRACE();
344 
345 	if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
346 		return 0;
347 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
348 
349 	return 0;
350 }
351 
352 static int
353 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
354 {
355 	struct axgbe_port *pdata = dev->data->dev_private;
356 
357 	PMD_INIT_FUNC_TRACE();
358 
359 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
360 		return 0;
361 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
362 
363 	return 0;
364 }
365 
366 /* return 0 means link status changed, -1 means not changed */
367 static int
368 axgbe_dev_link_update(struct rte_eth_dev *dev,
369 		      int wait_to_complete __rte_unused)
370 {
371 	struct axgbe_port *pdata = dev->data->dev_private;
372 	struct rte_eth_link link;
373 	int ret = 0;
374 
375 	PMD_INIT_FUNC_TRACE();
376 	rte_delay_ms(800);
377 
378 	pdata->phy_if.phy_status(pdata);
379 
380 	memset(&link, 0, sizeof(struct rte_eth_link));
381 	link.link_duplex = pdata->phy.duplex;
382 	link.link_status = pdata->phy_link;
383 	link.link_speed = pdata->phy_speed;
384 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
385 			      ETH_LINK_SPEED_FIXED);
386 	ret = rte_eth_linkstatus_set(dev, &link);
387 	if (ret == -1)
388 		PMD_DRV_LOG(ERR, "No change in link status\n");
389 
390 	return ret;
391 }
392 
393 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
394 {
395 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
396 
397 	/* Freeze counters */
398 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
399 
400 	/* Tx counters */
401 	stats->txoctetcount_gb +=
402 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
403 	stats->txoctetcount_gb +=
404 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
405 
406 	stats->txframecount_gb +=
407 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
408 	stats->txframecount_gb +=
409 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
410 
411 	stats->txbroadcastframes_g +=
412 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
413 	stats->txbroadcastframes_g +=
414 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
415 
416 	stats->txmulticastframes_g +=
417 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
418 	stats->txmulticastframes_g +=
419 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
420 
421 	stats->tx64octets_gb +=
422 		AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
423 	stats->tx64octets_gb +=
424 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
425 
426 	stats->tx65to127octets_gb +=
427 		AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
428 	stats->tx65to127octets_gb +=
429 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
430 
431 	stats->tx128to255octets_gb +=
432 		AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
433 	stats->tx128to255octets_gb +=
434 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
435 
436 	stats->tx256to511octets_gb +=
437 		AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
438 	stats->tx256to511octets_gb +=
439 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
440 
441 	stats->tx512to1023octets_gb +=
442 		AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
443 	stats->tx512to1023octets_gb +=
444 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
445 
446 	stats->tx1024tomaxoctets_gb +=
447 		AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
448 	stats->tx1024tomaxoctets_gb +=
449 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
450 
451 	stats->txunicastframes_gb +=
452 		AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
453 	stats->txunicastframes_gb +=
454 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
455 
456 	stats->txmulticastframes_gb +=
457 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
458 	stats->txmulticastframes_gb +=
459 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
460 
461 	stats->txbroadcastframes_g +=
462 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
463 	stats->txbroadcastframes_g +=
464 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
465 
466 	stats->txunderflowerror +=
467 		AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
468 	stats->txunderflowerror +=
469 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
470 
471 	stats->txoctetcount_g +=
472 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
473 	stats->txoctetcount_g +=
474 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
475 
476 	stats->txframecount_g +=
477 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
478 	stats->txframecount_g +=
479 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
480 
481 	stats->txpauseframes +=
482 		AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
483 	stats->txpauseframes +=
484 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
485 
486 	stats->txvlanframes_g +=
487 		AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
488 	stats->txvlanframes_g +=
489 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
490 
491 	/* Rx counters */
492 	stats->rxframecount_gb +=
493 		AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
494 	stats->rxframecount_gb +=
495 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
496 
497 	stats->rxoctetcount_gb +=
498 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
499 	stats->rxoctetcount_gb +=
500 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
501 
502 	stats->rxoctetcount_g +=
503 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
504 	stats->rxoctetcount_g +=
505 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
506 
507 	stats->rxbroadcastframes_g +=
508 		AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
509 	stats->rxbroadcastframes_g +=
510 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
511 
512 	stats->rxmulticastframes_g +=
513 		AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
514 	stats->rxmulticastframes_g +=
515 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
516 
517 	stats->rxcrcerror +=
518 		AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
519 	stats->rxcrcerror +=
520 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
521 
522 	stats->rxrunterror +=
523 		AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
524 
525 	stats->rxjabbererror +=
526 		AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
527 
528 	stats->rxundersize_g +=
529 		AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
530 
531 	stats->rxoversize_g +=
532 		AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
533 
534 	stats->rx64octets_gb +=
535 		AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
536 	stats->rx64octets_gb +=
537 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
538 
539 	stats->rx65to127octets_gb +=
540 		AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
541 	stats->rx65to127octets_gb +=
542 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
543 
544 	stats->rx128to255octets_gb +=
545 		AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
546 	stats->rx128to255octets_gb +=
547 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
548 
549 	stats->rx256to511octets_gb +=
550 		AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
551 	stats->rx256to511octets_gb +=
552 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
553 
554 	stats->rx512to1023octets_gb +=
555 		AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
556 	stats->rx512to1023octets_gb +=
557 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
558 
559 	stats->rx1024tomaxoctets_gb +=
560 		AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
561 	stats->rx1024tomaxoctets_gb +=
562 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
563 
564 	stats->rxunicastframes_g +=
565 		AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
566 	stats->rxunicastframes_g +=
567 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
568 
569 	stats->rxlengtherror +=
570 		AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
571 	stats->rxlengtherror +=
572 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
573 
574 	stats->rxoutofrangetype +=
575 		AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
576 	stats->rxoutofrangetype +=
577 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
578 
579 	stats->rxpauseframes +=
580 		AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
581 	stats->rxpauseframes +=
582 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
583 
584 	stats->rxfifooverflow +=
585 		AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
586 	stats->rxfifooverflow +=
587 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
588 
589 	stats->rxvlanframes_gb +=
590 		AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
591 	stats->rxvlanframes_gb +=
592 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
593 
594 	stats->rxwatchdogerror +=
595 		AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
596 
597 	/* Un-freeze counters */
598 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
599 }
600 
601 static int
602 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
603 		     unsigned int n)
604 {
605 	struct axgbe_port *pdata = dev->data->dev_private;
606 	unsigned int i;
607 
608 	if (!stats)
609 		return 0;
610 
611 	axgbe_read_mmc_stats(pdata);
612 
613 	for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
614 		stats[i].id = i;
615 		stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
616 				axgbe_xstats_strings[i].offset);
617 	}
618 
619 	return i;
620 }
621 
622 static int
623 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
624 			   struct rte_eth_xstat_name *xstats_names,
625 			   unsigned int n)
626 {
627 	unsigned int i;
628 
629 	if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
630 		for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
631 			snprintf(xstats_names[i].name,
632 				 RTE_ETH_XSTATS_NAME_SIZE, "%s",
633 				 axgbe_xstats_strings[i].name);
634 		}
635 	}
636 
637 	return AXGBE_XSTATS_COUNT;
638 }
639 
640 static int
641 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
642 			   uint64_t *values, unsigned int n)
643 {
644 	unsigned int i;
645 	uint64_t values_copy[AXGBE_XSTATS_COUNT];
646 
647 	if (!ids) {
648 		struct axgbe_port *pdata = dev->data->dev_private;
649 
650 		if (n < AXGBE_XSTATS_COUNT)
651 			return AXGBE_XSTATS_COUNT;
652 
653 		axgbe_read_mmc_stats(pdata);
654 
655 		for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
656 			values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
657 					axgbe_xstats_strings[i].offset);
658 		}
659 
660 		return i;
661 	}
662 
663 	axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
664 
665 	for (i = 0; i < n; i++) {
666 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
667 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
668 			return -1;
669 		}
670 		values[i] = values_copy[ids[i]];
671 	}
672 	return n;
673 }
674 
675 static int
676 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
677 				 struct rte_eth_xstat_name *xstats_names,
678 				 const uint64_t *ids,
679 				 unsigned int size)
680 {
681 	struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
682 	unsigned int i;
683 
684 	if (!ids)
685 		return axgbe_dev_xstats_get_names(dev, xstats_names, size);
686 
687 	axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
688 
689 	for (i = 0; i < size; i++) {
690 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
691 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
692 			return -1;
693 		}
694 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
695 	}
696 	return size;
697 }
698 
699 static int
700 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
701 {
702 	struct axgbe_port *pdata = dev->data->dev_private;
703 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
704 
705 	/* MMC registers are configured for reset on read */
706 	axgbe_read_mmc_stats(pdata);
707 
708 	/* Reset stats */
709 	memset(stats, 0, sizeof(*stats));
710 
711 	return 0;
712 }
713 
714 static int
715 axgbe_dev_stats_get(struct rte_eth_dev *dev,
716 		    struct rte_eth_stats *stats)
717 {
718 	struct axgbe_rx_queue *rxq;
719 	struct axgbe_tx_queue *txq;
720 	struct axgbe_port *pdata = dev->data->dev_private;
721 	struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
722 	unsigned int i;
723 
724 	axgbe_read_mmc_stats(pdata);
725 
726 	stats->imissed = mmc_stats->rxfifooverflow;
727 
728 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
729 		rxq = dev->data->rx_queues[i];
730 		stats->q_ipackets[i] = rxq->pkts;
731 		stats->ipackets += rxq->pkts;
732 		stats->q_ibytes[i] = rxq->bytes;
733 		stats->ibytes += rxq->bytes;
734 		stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
735 		stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed;
736 		stats->ierrors += rxq->errors;
737 	}
738 
739 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
740 		txq = dev->data->tx_queues[i];
741 		stats->q_opackets[i] = txq->pkts;
742 		stats->opackets += txq->pkts;
743 		stats->q_obytes[i] = txq->bytes;
744 		stats->obytes += txq->bytes;
745 		stats->oerrors += txq->errors;
746 	}
747 
748 	return 0;
749 }
750 
751 static int
752 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
753 {
754 	struct axgbe_rx_queue *rxq;
755 	struct axgbe_tx_queue *txq;
756 	unsigned int i;
757 
758 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
759 		rxq = dev->data->rx_queues[i];
760 		rxq->pkts = 0;
761 		rxq->bytes = 0;
762 		rxq->errors = 0;
763 		rxq->rx_mbuf_alloc_failed = 0;
764 	}
765 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
766 		txq = dev->data->tx_queues[i];
767 		txq->pkts = 0;
768 		txq->bytes = 0;
769 		txq->errors = 0;
770 	}
771 
772 	return 0;
773 }
774 
775 static int
776 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
777 {
778 	struct axgbe_port *pdata = dev->data->dev_private;
779 
780 	dev_info->max_rx_queues = pdata->rx_ring_count;
781 	dev_info->max_tx_queues = pdata->tx_ring_count;
782 	dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
783 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
784 	dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
785 	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
786 
787 	dev_info->rx_offload_capa =
788 		DEV_RX_OFFLOAD_IPV4_CKSUM |
789 		DEV_RX_OFFLOAD_UDP_CKSUM  |
790 		DEV_RX_OFFLOAD_TCP_CKSUM  |
791 		DEV_RX_OFFLOAD_KEEP_CRC;
792 
793 	dev_info->tx_offload_capa =
794 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
795 		DEV_TX_OFFLOAD_UDP_CKSUM   |
796 		DEV_TX_OFFLOAD_TCP_CKSUM;
797 
798 	if (pdata->hw_feat.rss) {
799 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
800 		dev_info->reta_size = pdata->hw_feat.hash_table_size;
801 		dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
802 	}
803 
804 	dev_info->rx_desc_lim = rx_desc_lim;
805 	dev_info->tx_desc_lim = tx_desc_lim;
806 
807 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
808 		.rx_free_thresh = AXGBE_RX_FREE_THRESH,
809 	};
810 
811 	dev_info->default_txconf = (struct rte_eth_txconf) {
812 		.tx_free_thresh = AXGBE_TX_FREE_THRESH,
813 	};
814 
815 	return 0;
816 }
817 
818 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
819 {
820 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
821 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
822 
823 	mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
824 	mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
825 	mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
826 
827 	memset(hw_feat, 0, sizeof(*hw_feat));
828 
829 	hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
830 
831 	/* Hardware feature register 0 */
832 	hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
833 	hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
834 	hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
835 	hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
836 	hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
837 	hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
838 	hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
839 	hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
840 	hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
841 	hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
842 	hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
843 	hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
844 					      ADDMACADRSEL);
845 	hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
846 	hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
847 
848 	/* Hardware feature register 1 */
849 	hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
850 						RXFIFOSIZE);
851 	hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
852 						TXFIFOSIZE);
853 	hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
854 						 MAC_HWF1R, ADVTHWORD);
855 	hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
856 	hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
857 	hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
858 	hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
859 	hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
860 	hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
861 	hw_feat->tc_cnt	       = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
862 	hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
863 						  HASHTBLSZ);
864 	hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
865 						  L3L4FNUM);
866 
867 	/* Hardware feature register 2 */
868 	hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
869 	hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
870 	hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
871 	hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
872 	hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
873 	hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
874 						AUXSNAPNUM);
875 
876 	/* Translate the Hash Table size into actual number */
877 	switch (hw_feat->hash_table_size) {
878 	case 0:
879 		break;
880 	case 1:
881 		hw_feat->hash_table_size = 64;
882 		break;
883 	case 2:
884 		hw_feat->hash_table_size = 128;
885 		break;
886 	case 3:
887 		hw_feat->hash_table_size = 256;
888 		break;
889 	}
890 
891 	/* Translate the address width setting into actual number */
892 	switch (hw_feat->dma_width) {
893 	case 0:
894 		hw_feat->dma_width = 32;
895 		break;
896 	case 1:
897 		hw_feat->dma_width = 40;
898 		break;
899 	case 2:
900 		hw_feat->dma_width = 48;
901 		break;
902 	default:
903 		hw_feat->dma_width = 32;
904 	}
905 
906 	/* The Queue, Channel and TC counts are zero based so increment them
907 	 * to get the actual number
908 	 */
909 	hw_feat->rx_q_cnt++;
910 	hw_feat->tx_q_cnt++;
911 	hw_feat->rx_ch_cnt++;
912 	hw_feat->tx_ch_cnt++;
913 	hw_feat->tc_cnt++;
914 
915 	/* Translate the fifo sizes into actual numbers */
916 	hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
917 	hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
918 }
919 
920 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
921 {
922 	axgbe_init_function_ptrs_dev(&pdata->hw_if);
923 	axgbe_init_function_ptrs_phy(&pdata->phy_if);
924 	axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
925 	pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
926 }
927 
928 static void axgbe_set_counts(struct axgbe_port *pdata)
929 {
930 	/* Set all the function pointers */
931 	axgbe_init_all_fptrs(pdata);
932 
933 	/* Populate the hardware features */
934 	axgbe_get_all_hw_features(pdata);
935 
936 	/* Set default max values if not provided */
937 	if (!pdata->tx_max_channel_count)
938 		pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
939 	if (!pdata->rx_max_channel_count)
940 		pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
941 
942 	if (!pdata->tx_max_q_count)
943 		pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
944 	if (!pdata->rx_max_q_count)
945 		pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
946 
947 	/* Calculate the number of Tx and Rx rings to be created
948 	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
949 	 *   the number of Tx queues to the number of Tx channels
950 	 *   enabled
951 	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
952 	 *   number of Rx queues or maximum allowed
953 	 */
954 	pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
955 				     pdata->tx_max_channel_count);
956 	pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
957 				     pdata->tx_max_q_count);
958 
959 	pdata->tx_q_count = pdata->tx_ring_count;
960 
961 	pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
962 				     pdata->rx_max_channel_count);
963 
964 	pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
965 				  pdata->rx_max_q_count);
966 }
967 
968 static void axgbe_default_config(struct axgbe_port *pdata)
969 {
970 	pdata->pblx8 = DMA_PBL_X8_ENABLE;
971 	pdata->tx_sf_mode = MTL_TSF_ENABLE;
972 	pdata->tx_threshold = MTL_TX_THRESHOLD_64;
973 	pdata->tx_pbl = DMA_PBL_32;
974 	pdata->tx_osp_mode = DMA_OSP_ENABLE;
975 	pdata->rx_sf_mode = MTL_RSF_ENABLE;
976 	pdata->rx_threshold = MTL_RX_THRESHOLD_64;
977 	pdata->rx_pbl = DMA_PBL_32;
978 	pdata->pause_autoneg = 1;
979 	pdata->tx_pause = 0;
980 	pdata->rx_pause = 0;
981 	pdata->phy_speed = SPEED_UNKNOWN;
982 	pdata->power_down = 0;
983 }
984 
985 /*
986  * It returns 0 on success.
987  */
988 static int
989 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
990 {
991 	PMD_INIT_FUNC_TRACE();
992 	struct axgbe_port *pdata;
993 	struct rte_pci_device *pci_dev;
994 	uint32_t reg, mac_lo, mac_hi;
995 	int ret;
996 
997 	eth_dev->dev_ops = &axgbe_eth_dev_ops;
998 	eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
999 
1000 	/*
1001 	 * For secondary processes, we don't initialise any further as primary
1002 	 * has already done this work.
1003 	 */
1004 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1005 		return 0;
1006 
1007 	pdata = eth_dev->data->dev_private;
1008 	/* initial state */
1009 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
1010 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
1011 	pdata->eth_dev = eth_dev;
1012 
1013 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1014 	pdata->pci_dev = pci_dev;
1015 
1016 	pdata->xgmac_regs =
1017 		(void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
1018 	pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
1019 				     + AXGBE_MAC_PROP_OFFSET);
1020 	pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
1021 				    + AXGBE_I2C_CTRL_OFFSET);
1022 	pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
1023 
1024 	/* version specific driver data*/
1025 	if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
1026 		pdata->vdata = &axgbe_v2a;
1027 	else
1028 		pdata->vdata = &axgbe_v2b;
1029 
1030 	/* Configure the PCS indirect addressing support */
1031 	reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
1032 	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
1033 	pdata->xpcs_window <<= 6;
1034 	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
1035 	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
1036 	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
1037 	pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
1038 	pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
1039 	PMD_INIT_LOG(DEBUG,
1040 		     "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
1041 		     pdata->xpcs_window_size, pdata->xpcs_window_mask);
1042 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
1043 
1044 	/* Retrieve the MAC address */
1045 	mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
1046 	mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
1047 	pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
1048 	pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
1049 	pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
1050 	pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
1051 	pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
1052 	pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
1053 
1054 	eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
1055 					       RTE_ETHER_ADDR_LEN, 0);
1056 	if (!eth_dev->data->mac_addrs) {
1057 		PMD_INIT_LOG(ERR,
1058 			     "Failed to alloc %u bytes needed to store MAC addr tbl",
1059 			     RTE_ETHER_ADDR_LEN);
1060 		return -ENOMEM;
1061 	}
1062 
1063 	if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
1064 		rte_eth_random_addr(pdata->mac_addr.addr_bytes);
1065 
1066 	/* Copy the permanent MAC address */
1067 	rte_ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
1068 
1069 	/* Clock settings */
1070 	pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
1071 	pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1072 
1073 	/* Set the DMA coherency values */
1074 	pdata->coherent = 1;
1075 	pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
1076 	pdata->arcache = AXGBE_DMA_OS_ARCACHE;
1077 	pdata->awcache = AXGBE_DMA_OS_AWCACHE;
1078 
1079 	/* Set the maximum channels and queues */
1080 	reg = XP_IOREAD(pdata, XP_PROP_1);
1081 	pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
1082 	pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
1083 	pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
1084 	pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
1085 
1086 	/* Set the hardware channel and queue counts */
1087 	axgbe_set_counts(pdata);
1088 
1089 	/* Set the maximum fifo amounts */
1090 	reg = XP_IOREAD(pdata, XP_PROP_2);
1091 	pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
1092 	pdata->tx_max_fifo_size *= 16384;
1093 	pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1094 					  pdata->vdata->tx_max_fifo_size);
1095 	pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
1096 	pdata->rx_max_fifo_size *= 16384;
1097 	pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1098 					  pdata->vdata->rx_max_fifo_size);
1099 	/* Issue software reset to DMA */
1100 	ret = pdata->hw_if.exit(pdata);
1101 	if (ret)
1102 		PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
1103 
1104 	/* Set default configuration data */
1105 	axgbe_default_config(pdata);
1106 
1107 	/* Set default max values if not provided */
1108 	if (!pdata->tx_max_fifo_size)
1109 		pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
1110 	if (!pdata->rx_max_fifo_size)
1111 		pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
1112 
1113 	pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
1114 	pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
1115 	pthread_mutex_init(&pdata->xpcs_mutex, NULL);
1116 	pthread_mutex_init(&pdata->i2c_mutex, NULL);
1117 	pthread_mutex_init(&pdata->an_mutex, NULL);
1118 	pthread_mutex_init(&pdata->phy_mutex, NULL);
1119 
1120 	ret = pdata->phy_if.phy_init(pdata);
1121 	if (ret) {
1122 		rte_free(eth_dev->data->mac_addrs);
1123 		eth_dev->data->mac_addrs = NULL;
1124 		return ret;
1125 	}
1126 
1127 	rte_intr_callback_register(&pci_dev->intr_handle,
1128 				   axgbe_dev_interrupt_handler,
1129 				   (void *)eth_dev);
1130 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1131 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
1132 		     pci_dev->id.device_id);
1133 
1134 	return 0;
1135 }
1136 
1137 static int
1138 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1139 {
1140 	struct rte_pci_device *pci_dev;
1141 
1142 	PMD_INIT_FUNC_TRACE();
1143 
1144 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1145 		return 0;
1146 
1147 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1148 	eth_dev->dev_ops = NULL;
1149 	eth_dev->rx_pkt_burst = NULL;
1150 	eth_dev->tx_pkt_burst = NULL;
1151 	axgbe_dev_clear_queues(eth_dev);
1152 
1153 	/* disable uio intr before callback unregister */
1154 	rte_intr_disable(&pci_dev->intr_handle);
1155 	rte_intr_callback_unregister(&pci_dev->intr_handle,
1156 				     axgbe_dev_interrupt_handler,
1157 				     (void *)eth_dev);
1158 
1159 	return 0;
1160 }
1161 
1162 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1163 	struct rte_pci_device *pci_dev)
1164 {
1165 	return rte_eth_dev_pci_generic_probe(pci_dev,
1166 		sizeof(struct axgbe_port), eth_axgbe_dev_init);
1167 }
1168 
1169 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
1170 {
1171 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
1172 }
1173 
1174 static struct rte_pci_driver rte_axgbe_pmd = {
1175 	.id_table = pci_id_axgbe_map,
1176 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1177 	.probe = eth_axgbe_pci_probe,
1178 	.remove = eth_axgbe_pci_remove,
1179 };
1180 
1181 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
1182 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
1183 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1184 
1185 RTE_INIT(axgbe_init_log)
1186 {
1187 	axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
1188 	if (axgbe_logtype_init >= 0)
1189 		rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
1190 	axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
1191 	if (axgbe_logtype_driver >= 0)
1192 		rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
1193 }
1194