xref: /dpdk/drivers/net/axgbe/axgbe_ethdev.c (revision 410cf0870c48e67483bfccbdd4c734097c01d2c4)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5 
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10 #include "axgbe_regs.h"
11 
12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
14 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
15 static int  axgbe_dev_start(struct rte_eth_dev *dev);
16 static void axgbe_dev_stop(struct rte_eth_dev *dev);
17 static void axgbe_dev_interrupt_handler(void *param);
18 static void axgbe_dev_close(struct rte_eth_dev *dev);
19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
24 				  struct rte_ether_addr *mac_addr);
25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
26 				  struct rte_ether_addr *mac_addr,
27 				  uint32_t index,
28 				  uint32_t vmdq);
29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
31 				      struct rte_ether_addr *mc_addr_set,
32 				      uint32_t nb_mc_addr);
33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
34 				       struct rte_ether_addr *mac_addr,
35 				       uint8_t add);
36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev,
37 					   uint8_t add);
38 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
39 				 int wait_to_complete);
40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
41 			      struct rte_dev_reg_info *regs);
42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
43 				struct rte_eth_stats *stats);
44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
46 				struct rte_eth_xstat *stats,
47 				unsigned int n);
48 static int
49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
50 			   struct rte_eth_xstat_name *xstats_names,
51 			   unsigned int size);
52 static int
53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
54 			   const uint64_t *ids,
55 			   uint64_t *values,
56 			   unsigned int n);
57 static int
58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
59 				 struct rte_eth_xstat_name *xstats_names,
60 				 const uint64_t *ids,
61 				 unsigned int size);
62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
63 static int  axgbe_dev_info_get(struct rte_eth_dev *dev,
64 			       struct rte_eth_dev_info *dev_info);
65 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev,
66 				struct rte_eth_fc_conf *fc_conf);
67 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev,
68 				struct rte_eth_fc_conf *fc_conf);
69 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
70 				struct rte_eth_pfc_conf *pfc_conf);
71 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
72 	struct rte_eth_rxq_info *qinfo);
73 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
74 	struct rte_eth_txq_info *qinfo);
75 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
76 
77 struct axgbe_xstats {
78 	char name[RTE_ETH_XSTATS_NAME_SIZE];
79 	int offset;
80 };
81 
82 #define AXGMAC_MMC_STAT(_string, _var)                           \
83 	{ _string,                                              \
84 	  offsetof(struct axgbe_mmc_stats, _var),       \
85 	}
86 
87 static const struct axgbe_xstats axgbe_xstats_strings[] = {
88 	AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
89 	AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
90 	AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
91 	AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
92 	AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
93 	AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
94 	AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
95 	AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
96 	AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
97 	AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
98 	AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
99 	AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
100 	AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
101 	AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
102 
103 	AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
104 	AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
105 	AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
106 	AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
107 	AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
108 	AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
109 	AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
110 	AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
111 	AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
112 	AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
113 	AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
114 	AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
115 	AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
116 	AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
117 	AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
118 	AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
119 	AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
120 	AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
121 	AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
122 	AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
123 	AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
124 	AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
125 };
126 
127 #define AXGBE_XSTATS_COUNT        ARRAY_SIZE(axgbe_xstats_strings)
128 
129 /* The set of PCI devices this driver supports */
130 #define AMD_PCI_VENDOR_ID       0x1022
131 #define AMD_PCI_RV_ROOT_COMPLEX_ID	0x15d0
132 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
133 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
134 
135 int axgbe_logtype_init;
136 int axgbe_logtype_driver;
137 
138 static const struct rte_pci_id pci_id_axgbe_map[] = {
139 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
140 	{RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
141 	{ .vendor_id = 0, },
142 };
143 
144 static struct axgbe_version_data axgbe_v2a = {
145 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
146 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
147 	.mmc_64bit			= 1,
148 	.tx_max_fifo_size		= 229376,
149 	.rx_max_fifo_size		= 229376,
150 	.tx_tstamp_workaround		= 1,
151 	.ecc_support			= 1,
152 	.i2c_support			= 1,
153 	.an_cdr_workaround		= 1,
154 };
155 
156 static struct axgbe_version_data axgbe_v2b = {
157 	.init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
158 	.xpcs_access			= AXGBE_XPCS_ACCESS_V2,
159 	.mmc_64bit			= 1,
160 	.tx_max_fifo_size		= 65536,
161 	.rx_max_fifo_size		= 65536,
162 	.tx_tstamp_workaround		= 1,
163 	.ecc_support			= 1,
164 	.i2c_support			= 1,
165 	.an_cdr_workaround		= 1,
166 };
167 
168 static const struct rte_eth_desc_lim rx_desc_lim = {
169 	.nb_max = AXGBE_MAX_RING_DESC,
170 	.nb_min = AXGBE_MIN_RING_DESC,
171 	.nb_align = 8,
172 };
173 
174 static const struct rte_eth_desc_lim tx_desc_lim = {
175 	.nb_max = AXGBE_MAX_RING_DESC,
176 	.nb_min = AXGBE_MIN_RING_DESC,
177 	.nb_align = 8,
178 };
179 
180 static const struct eth_dev_ops axgbe_eth_dev_ops = {
181 	.dev_configure        = axgbe_dev_configure,
182 	.dev_start            = axgbe_dev_start,
183 	.dev_stop             = axgbe_dev_stop,
184 	.dev_close            = axgbe_dev_close,
185 	.promiscuous_enable   = axgbe_dev_promiscuous_enable,
186 	.promiscuous_disable  = axgbe_dev_promiscuous_disable,
187 	.allmulticast_enable  = axgbe_dev_allmulticast_enable,
188 	.allmulticast_disable = axgbe_dev_allmulticast_disable,
189 	.mac_addr_set         = axgbe_dev_mac_addr_set,
190 	.mac_addr_add         = axgbe_dev_mac_addr_add,
191 	.mac_addr_remove      = axgbe_dev_mac_addr_remove,
192 	.set_mc_addr_list     = axgbe_dev_set_mc_addr_list,
193 	.uc_hash_table_set    = axgbe_dev_uc_hash_table_set,
194 	.uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set,
195 	.link_update          = axgbe_dev_link_update,
196 	.get_reg	      = axgbe_dev_get_regs,
197 	.stats_get            = axgbe_dev_stats_get,
198 	.stats_reset          = axgbe_dev_stats_reset,
199 	.xstats_get	      = axgbe_dev_xstats_get,
200 	.xstats_reset	      = axgbe_dev_xstats_reset,
201 	.xstats_get_names     = axgbe_dev_xstats_get_names,
202 	.xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
203 	.xstats_get_by_id     = axgbe_dev_xstats_get_by_id,
204 	.dev_infos_get        = axgbe_dev_info_get,
205 	.rx_queue_setup       = axgbe_dev_rx_queue_setup,
206 	.rx_queue_release     = axgbe_dev_rx_queue_release,
207 	.tx_queue_setup       = axgbe_dev_tx_queue_setup,
208 	.tx_queue_release     = axgbe_dev_tx_queue_release,
209 	.flow_ctrl_get        = axgbe_flow_ctrl_get,
210 	.flow_ctrl_set        = axgbe_flow_ctrl_set,
211 	.priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set,
212 	.rxq_info_get                 = axgbe_rxq_info_get,
213 	.txq_info_get                 = axgbe_txq_info_get,
214 	.dev_supported_ptypes_get     = axgbe_dev_supported_ptypes_get,
215 };
216 
217 static int axgbe_phy_reset(struct axgbe_port *pdata)
218 {
219 	pdata->phy_link = -1;
220 	pdata->phy_speed = SPEED_UNKNOWN;
221 	return pdata->phy_if.phy_reset(pdata);
222 }
223 
224 /*
225  * Interrupt handler triggered by NIC  for handling
226  * specific interrupt.
227  *
228  * @param handle
229  *  Pointer to interrupt handle.
230  * @param param
231  *  The address of parameter (struct rte_eth_dev *) regsitered before.
232  *
233  * @return
234  *  void
235  */
236 static void
237 axgbe_dev_interrupt_handler(void *param)
238 {
239 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
240 	struct axgbe_port *pdata = dev->data->dev_private;
241 	unsigned int dma_isr, dma_ch_isr;
242 
243 	pdata->phy_if.an_isr(pdata);
244 	/*DMA related interrupts*/
245 	dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
246 	PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
247 	if (dma_isr) {
248 		if (dma_isr & 1) {
249 			dma_ch_isr =
250 				AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
251 						  pdata->rx_queues[0],
252 						  DMA_CH_SR);
253 			PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
254 			AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
255 					   pdata->rx_queues[0],
256 					   DMA_CH_SR, dma_ch_isr);
257 		}
258 	}
259 	/* Unmask interrupts since disabled after generation */
260 	rte_intr_ack(&pdata->pci_dev->intr_handle);
261 }
262 
263 /*
264  * Configure device link speed and setup link.
265  * It returns 0 on success.
266  */
267 static int
268 axgbe_dev_configure(struct rte_eth_dev *dev)
269 {
270 	struct axgbe_port *pdata =  dev->data->dev_private;
271 	/* Checksum offload to hardware */
272 	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
273 				DEV_RX_OFFLOAD_CHECKSUM;
274 	return 0;
275 }
276 
277 static int
278 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
279 {
280 	struct axgbe_port *pdata = dev->data->dev_private;
281 
282 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
283 		pdata->rss_enable = 1;
284 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
285 		pdata->rss_enable = 0;
286 	else
287 		return  -1;
288 	return 0;
289 }
290 
291 static int
292 axgbe_dev_start(struct rte_eth_dev *dev)
293 {
294 	struct axgbe_port *pdata = dev->data->dev_private;
295 	int ret;
296 	struct rte_eth_dev_data *dev_data = dev->data;
297 	uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
298 
299 	dev->dev_ops = &axgbe_eth_dev_ops;
300 
301 	PMD_INIT_FUNC_TRACE();
302 
303 	/* Multiqueue RSS */
304 	ret = axgbe_dev_rx_mq_config(dev);
305 	if (ret) {
306 		PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
307 		return ret;
308 	}
309 	ret = axgbe_phy_reset(pdata);
310 	if (ret) {
311 		PMD_DRV_LOG(ERR, "phy reset failed\n");
312 		return ret;
313 	}
314 	ret = pdata->hw_if.init(pdata);
315 	if (ret) {
316 		PMD_DRV_LOG(ERR, "dev_init failed\n");
317 		return ret;
318 	}
319 
320 	/* enable uio/vfio intr/eventfd mapping */
321 	rte_intr_enable(&pdata->pci_dev->intr_handle);
322 
323 	/* phy start*/
324 	pdata->phy_if.phy_start(pdata);
325 	axgbe_dev_enable_tx(dev);
326 	axgbe_dev_enable_rx(dev);
327 
328 	axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
329 	axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
330 	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
331 				max_pkt_len > pdata->rx_buf_size)
332 		dev_data->scattered_rx = 1;
333 
334 	/*  Scatter Rx handling */
335 	if (dev_data->scattered_rx)
336 		dev->rx_pkt_burst = &eth_axgbe_recv_scattered_pkts;
337 	else
338 		dev->rx_pkt_burst = &axgbe_recv_pkts;
339 
340 	return 0;
341 }
342 
343 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
344 static void
345 axgbe_dev_stop(struct rte_eth_dev *dev)
346 {
347 	struct axgbe_port *pdata = dev->data->dev_private;
348 
349 	PMD_INIT_FUNC_TRACE();
350 
351 	rte_intr_disable(&pdata->pci_dev->intr_handle);
352 
353 	if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
354 		return;
355 
356 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
357 	axgbe_dev_disable_tx(dev);
358 	axgbe_dev_disable_rx(dev);
359 
360 	pdata->phy_if.phy_stop(pdata);
361 	pdata->hw_if.exit(pdata);
362 	memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
363 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
364 }
365 
366 /* Clear all resources like TX/RX queues. */
367 static void
368 axgbe_dev_close(struct rte_eth_dev *dev)
369 {
370 	axgbe_dev_clear_queues(dev);
371 }
372 
373 static int
374 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
375 {
376 	struct axgbe_port *pdata = dev->data->dev_private;
377 
378 	PMD_INIT_FUNC_TRACE();
379 
380 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
381 
382 	return 0;
383 }
384 
385 static int
386 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
387 {
388 	struct axgbe_port *pdata = dev->data->dev_private;
389 
390 	PMD_INIT_FUNC_TRACE();
391 
392 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
393 
394 	return 0;
395 }
396 
397 static int
398 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
399 {
400 	struct axgbe_port *pdata = dev->data->dev_private;
401 
402 	PMD_INIT_FUNC_TRACE();
403 
404 	if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
405 		return 0;
406 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
407 
408 	return 0;
409 }
410 
411 static int
412 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
413 {
414 	struct axgbe_port *pdata = dev->data->dev_private;
415 
416 	PMD_INIT_FUNC_TRACE();
417 
418 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
419 		return 0;
420 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
421 
422 	return 0;
423 }
424 
425 static int
426 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
427 {
428 	struct axgbe_port *pdata = dev->data->dev_private;
429 
430 	/* Set Default MAC Addr */
431 	axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
432 
433 	return 0;
434 }
435 
436 static int
437 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
438 			      uint32_t index, uint32_t pool __rte_unused)
439 {
440 	struct axgbe_port *pdata = dev->data->dev_private;
441 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
442 
443 	if (index > hw_feat->addn_mac) {
444 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
445 		return -EINVAL;
446 	}
447 	axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
448 	return 0;
449 }
450 
451 static void
452 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
453 {
454 	struct axgbe_port *pdata = dev->data->dev_private;
455 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
456 
457 	if (index > hw_feat->addn_mac) {
458 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
459 		return;
460 	}
461 	axgbe_set_mac_addn_addr(pdata, NULL, index);
462 }
463 
464 static int
465 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
466 				      struct rte_ether_addr *mc_addr_set,
467 				      uint32_t nb_mc_addr)
468 {
469 	struct axgbe_port *pdata = dev->data->dev_private;
470 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
471 	uint32_t index = 1; /* 0 is always default mac */
472 	uint32_t i;
473 
474 	if (nb_mc_addr > hw_feat->addn_mac) {
475 		PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
476 		return -EINVAL;
477 	}
478 
479 	/* clear unicast addresses */
480 	for (i = 1; i < hw_feat->addn_mac; i++) {
481 		if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
482 			continue;
483 		memset(&dev->data->mac_addrs[i], 0,
484 		       sizeof(struct rte_ether_addr));
485 	}
486 
487 	while (nb_mc_addr--)
488 		axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
489 
490 	return 0;
491 }
492 
493 static int
494 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
495 			    struct rte_ether_addr *mac_addr, uint8_t add)
496 {
497 	struct axgbe_port *pdata = dev->data->dev_private;
498 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
499 
500 	if (!hw_feat->hash_table_size) {
501 		PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
502 		return -ENOTSUP;
503 	}
504 
505 	axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add);
506 
507 	if (pdata->uc_hash_mac_addr > 0) {
508 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
509 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
510 	} else {
511 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
512 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
513 	}
514 	return 0;
515 }
516 
517 static int
518 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
519 {
520 	struct axgbe_port *pdata = dev->data->dev_private;
521 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
522 	uint32_t index;
523 
524 	if (!hw_feat->hash_table_size) {
525 		PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
526 		return -ENOTSUP;
527 	}
528 
529 	for (index = 0; index < pdata->hash_table_count; index++) {
530 		if (add)
531 			pdata->uc_hash_table[index] = ~0;
532 		else
533 			pdata->uc_hash_table[index] = 0;
534 
535 		PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
536 			    add ? "set" : "clear", index);
537 
538 		AXGMAC_IOWRITE(pdata, MAC_HTR(index),
539 			       pdata->uc_hash_table[index]);
540 	}
541 
542 	if (add) {
543 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
544 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
545 	} else {
546 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
547 		AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
548 	}
549 	return 0;
550 }
551 
552 /* return 0 means link status changed, -1 means not changed */
553 static int
554 axgbe_dev_link_update(struct rte_eth_dev *dev,
555 		      int wait_to_complete __rte_unused)
556 {
557 	struct axgbe_port *pdata = dev->data->dev_private;
558 	struct rte_eth_link link;
559 	int ret = 0;
560 
561 	PMD_INIT_FUNC_TRACE();
562 	rte_delay_ms(800);
563 
564 	pdata->phy_if.phy_status(pdata);
565 
566 	memset(&link, 0, sizeof(struct rte_eth_link));
567 	link.link_duplex = pdata->phy.duplex;
568 	link.link_status = pdata->phy_link;
569 	link.link_speed = pdata->phy_speed;
570 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
571 			      ETH_LINK_SPEED_FIXED);
572 	ret = rte_eth_linkstatus_set(dev, &link);
573 	if (ret == -1)
574 		PMD_DRV_LOG(ERR, "No change in link status\n");
575 
576 	return ret;
577 }
578 
579 static int
580 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
581 {
582 	struct axgbe_port *pdata = dev->data->dev_private;
583 
584 	if (regs->data == NULL) {
585 		regs->length = axgbe_regs_get_count(pdata);
586 		regs->width = sizeof(uint32_t);
587 		return 0;
588 	}
589 
590 	/* Only full register dump is supported */
591 	if (regs->length &&
592 	    regs->length != (uint32_t)axgbe_regs_get_count(pdata))
593 		return -ENOTSUP;
594 
595 	regs->version = pdata->pci_dev->id.vendor_id << 16 |
596 			pdata->pci_dev->id.device_id;
597 	axgbe_regs_dump(pdata, regs->data);
598 	return 0;
599 }
600 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
601 {
602 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
603 
604 	/* Freeze counters */
605 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
606 
607 	/* Tx counters */
608 	stats->txoctetcount_gb +=
609 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
610 	stats->txoctetcount_gb +=
611 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
612 
613 	stats->txframecount_gb +=
614 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
615 	stats->txframecount_gb +=
616 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
617 
618 	stats->txbroadcastframes_g +=
619 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
620 	stats->txbroadcastframes_g +=
621 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
622 
623 	stats->txmulticastframes_g +=
624 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
625 	stats->txmulticastframes_g +=
626 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
627 
628 	stats->tx64octets_gb +=
629 		AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
630 	stats->tx64octets_gb +=
631 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
632 
633 	stats->tx65to127octets_gb +=
634 		AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
635 	stats->tx65to127octets_gb +=
636 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
637 
638 	stats->tx128to255octets_gb +=
639 		AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
640 	stats->tx128to255octets_gb +=
641 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
642 
643 	stats->tx256to511octets_gb +=
644 		AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
645 	stats->tx256to511octets_gb +=
646 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
647 
648 	stats->tx512to1023octets_gb +=
649 		AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
650 	stats->tx512to1023octets_gb +=
651 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
652 
653 	stats->tx1024tomaxoctets_gb +=
654 		AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
655 	stats->tx1024tomaxoctets_gb +=
656 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
657 
658 	stats->txunicastframes_gb +=
659 		AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
660 	stats->txunicastframes_gb +=
661 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
662 
663 	stats->txmulticastframes_gb +=
664 		AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
665 	stats->txmulticastframes_gb +=
666 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
667 
668 	stats->txbroadcastframes_g +=
669 		AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
670 	stats->txbroadcastframes_g +=
671 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
672 
673 	stats->txunderflowerror +=
674 		AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
675 	stats->txunderflowerror +=
676 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
677 
678 	stats->txoctetcount_g +=
679 		AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
680 	stats->txoctetcount_g +=
681 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
682 
683 	stats->txframecount_g +=
684 		AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
685 	stats->txframecount_g +=
686 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
687 
688 	stats->txpauseframes +=
689 		AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
690 	stats->txpauseframes +=
691 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
692 
693 	stats->txvlanframes_g +=
694 		AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
695 	stats->txvlanframes_g +=
696 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
697 
698 	/* Rx counters */
699 	stats->rxframecount_gb +=
700 		AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
701 	stats->rxframecount_gb +=
702 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
703 
704 	stats->rxoctetcount_gb +=
705 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
706 	stats->rxoctetcount_gb +=
707 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
708 
709 	stats->rxoctetcount_g +=
710 		AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
711 	stats->rxoctetcount_g +=
712 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
713 
714 	stats->rxbroadcastframes_g +=
715 		AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
716 	stats->rxbroadcastframes_g +=
717 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
718 
719 	stats->rxmulticastframes_g +=
720 		AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
721 	stats->rxmulticastframes_g +=
722 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
723 
724 	stats->rxcrcerror +=
725 		AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
726 	stats->rxcrcerror +=
727 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
728 
729 	stats->rxrunterror +=
730 		AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
731 
732 	stats->rxjabbererror +=
733 		AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
734 
735 	stats->rxundersize_g +=
736 		AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
737 
738 	stats->rxoversize_g +=
739 		AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
740 
741 	stats->rx64octets_gb +=
742 		AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
743 	stats->rx64octets_gb +=
744 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
745 
746 	stats->rx65to127octets_gb +=
747 		AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
748 	stats->rx65to127octets_gb +=
749 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
750 
751 	stats->rx128to255octets_gb +=
752 		AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
753 	stats->rx128to255octets_gb +=
754 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
755 
756 	stats->rx256to511octets_gb +=
757 		AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
758 	stats->rx256to511octets_gb +=
759 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
760 
761 	stats->rx512to1023octets_gb +=
762 		AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
763 	stats->rx512to1023octets_gb +=
764 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
765 
766 	stats->rx1024tomaxoctets_gb +=
767 		AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
768 	stats->rx1024tomaxoctets_gb +=
769 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
770 
771 	stats->rxunicastframes_g +=
772 		AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
773 	stats->rxunicastframes_g +=
774 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
775 
776 	stats->rxlengtherror +=
777 		AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
778 	stats->rxlengtherror +=
779 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
780 
781 	stats->rxoutofrangetype +=
782 		AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
783 	stats->rxoutofrangetype +=
784 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
785 
786 	stats->rxpauseframes +=
787 		AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
788 	stats->rxpauseframes +=
789 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
790 
791 	stats->rxfifooverflow +=
792 		AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
793 	stats->rxfifooverflow +=
794 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
795 
796 	stats->rxvlanframes_gb +=
797 		AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
798 	stats->rxvlanframes_gb +=
799 	((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
800 
801 	stats->rxwatchdogerror +=
802 		AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
803 
804 	/* Un-freeze counters */
805 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
806 }
807 
808 static int
809 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
810 		     unsigned int n)
811 {
812 	struct axgbe_port *pdata = dev->data->dev_private;
813 	unsigned int i;
814 
815 	if (!stats)
816 		return 0;
817 
818 	axgbe_read_mmc_stats(pdata);
819 
820 	for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
821 		stats[i].id = i;
822 		stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
823 				axgbe_xstats_strings[i].offset);
824 	}
825 
826 	return i;
827 }
828 
829 static int
830 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
831 			   struct rte_eth_xstat_name *xstats_names,
832 			   unsigned int n)
833 {
834 	unsigned int i;
835 
836 	if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
837 		for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
838 			snprintf(xstats_names[i].name,
839 				 RTE_ETH_XSTATS_NAME_SIZE, "%s",
840 				 axgbe_xstats_strings[i].name);
841 		}
842 	}
843 
844 	return AXGBE_XSTATS_COUNT;
845 }
846 
847 static int
848 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
849 			   uint64_t *values, unsigned int n)
850 {
851 	unsigned int i;
852 	uint64_t values_copy[AXGBE_XSTATS_COUNT];
853 
854 	if (!ids) {
855 		struct axgbe_port *pdata = dev->data->dev_private;
856 
857 		if (n < AXGBE_XSTATS_COUNT)
858 			return AXGBE_XSTATS_COUNT;
859 
860 		axgbe_read_mmc_stats(pdata);
861 
862 		for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
863 			values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
864 					axgbe_xstats_strings[i].offset);
865 		}
866 
867 		return i;
868 	}
869 
870 	axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
871 
872 	for (i = 0; i < n; i++) {
873 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
874 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
875 			return -1;
876 		}
877 		values[i] = values_copy[ids[i]];
878 	}
879 	return n;
880 }
881 
882 static int
883 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
884 				 struct rte_eth_xstat_name *xstats_names,
885 				 const uint64_t *ids,
886 				 unsigned int size)
887 {
888 	struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
889 	unsigned int i;
890 
891 	if (!ids)
892 		return axgbe_dev_xstats_get_names(dev, xstats_names, size);
893 
894 	axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
895 
896 	for (i = 0; i < size; i++) {
897 		if (ids[i] >= AXGBE_XSTATS_COUNT) {
898 			PMD_DRV_LOG(ERR, "id value isn't valid\n");
899 			return -1;
900 		}
901 		strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
902 	}
903 	return size;
904 }
905 
906 static int
907 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
908 {
909 	struct axgbe_port *pdata = dev->data->dev_private;
910 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
911 
912 	/* MMC registers are configured for reset on read */
913 	axgbe_read_mmc_stats(pdata);
914 
915 	/* Reset stats */
916 	memset(stats, 0, sizeof(*stats));
917 
918 	return 0;
919 }
920 
921 static int
922 axgbe_dev_stats_get(struct rte_eth_dev *dev,
923 		    struct rte_eth_stats *stats)
924 {
925 	struct axgbe_rx_queue *rxq;
926 	struct axgbe_tx_queue *txq;
927 	struct axgbe_port *pdata = dev->data->dev_private;
928 	struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
929 	unsigned int i;
930 
931 	axgbe_read_mmc_stats(pdata);
932 
933 	stats->imissed = mmc_stats->rxfifooverflow;
934 
935 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
936 		rxq = dev->data->rx_queues[i];
937 		stats->q_ipackets[i] = rxq->pkts;
938 		stats->ipackets += rxq->pkts;
939 		stats->q_ibytes[i] = rxq->bytes;
940 		stats->ibytes += rxq->bytes;
941 		stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
942 		stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed;
943 		stats->ierrors += rxq->errors;
944 	}
945 
946 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
947 		txq = dev->data->tx_queues[i];
948 		stats->q_opackets[i] = txq->pkts;
949 		stats->opackets += txq->pkts;
950 		stats->q_obytes[i] = txq->bytes;
951 		stats->obytes += txq->bytes;
952 		stats->oerrors += txq->errors;
953 	}
954 
955 	return 0;
956 }
957 
958 static int
959 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
960 {
961 	struct axgbe_rx_queue *rxq;
962 	struct axgbe_tx_queue *txq;
963 	unsigned int i;
964 
965 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
966 		rxq = dev->data->rx_queues[i];
967 		rxq->pkts = 0;
968 		rxq->bytes = 0;
969 		rxq->errors = 0;
970 		rxq->rx_mbuf_alloc_failed = 0;
971 	}
972 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
973 		txq = dev->data->tx_queues[i];
974 		txq->pkts = 0;
975 		txq->bytes = 0;
976 		txq->errors = 0;
977 	}
978 
979 	return 0;
980 }
981 
982 static int
983 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
984 {
985 	struct axgbe_port *pdata = dev->data->dev_private;
986 
987 	dev_info->max_rx_queues = pdata->rx_ring_count;
988 	dev_info->max_tx_queues = pdata->tx_ring_count;
989 	dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
990 	dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
991 	dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
992 	dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
993 	dev_info->speed_capa =  ETH_LINK_SPEED_10G;
994 
995 	dev_info->rx_offload_capa =
996 		DEV_RX_OFFLOAD_IPV4_CKSUM |
997 		DEV_RX_OFFLOAD_UDP_CKSUM  |
998 		DEV_RX_OFFLOAD_TCP_CKSUM  |
999 		DEV_RX_OFFLOAD_JUMBO_FRAME	|
1000 		DEV_RX_OFFLOAD_SCATTER	  |
1001 		DEV_RX_OFFLOAD_KEEP_CRC;
1002 
1003 	dev_info->tx_offload_capa =
1004 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
1005 		DEV_TX_OFFLOAD_UDP_CKSUM   |
1006 		DEV_TX_OFFLOAD_TCP_CKSUM;
1007 
1008 	if (pdata->hw_feat.rss) {
1009 		dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
1010 		dev_info->reta_size = pdata->hw_feat.hash_table_size;
1011 		dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
1012 	}
1013 
1014 	dev_info->rx_desc_lim = rx_desc_lim;
1015 	dev_info->tx_desc_lim = tx_desc_lim;
1016 
1017 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1018 		.rx_free_thresh = AXGBE_RX_FREE_THRESH,
1019 	};
1020 
1021 	dev_info->default_txconf = (struct rte_eth_txconf) {
1022 		.tx_free_thresh = AXGBE_TX_FREE_THRESH,
1023 	};
1024 
1025 	return 0;
1026 }
1027 
1028 static int
1029 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1030 {
1031 	struct axgbe_port *pdata = dev->data->dev_private;
1032 	struct xgbe_fc_info fc = pdata->fc;
1033 	unsigned int reg, reg_val = 0;
1034 
1035 	reg = MAC_Q0TFCR;
1036 	reg_val = AXGMAC_IOREAD(pdata, reg);
1037 	fc.low_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA);
1038 	fc.high_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD);
1039 	fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT);
1040 	fc.autoneg = pdata->pause_autoneg;
1041 
1042 	if (pdata->rx_pause && pdata->tx_pause)
1043 		fc.mode = RTE_FC_FULL;
1044 	else if (pdata->rx_pause)
1045 		fc.mode = RTE_FC_RX_PAUSE;
1046 	else if (pdata->tx_pause)
1047 		fc.mode = RTE_FC_TX_PAUSE;
1048 	else
1049 		fc.mode = RTE_FC_NONE;
1050 
1051 	fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
1052 	fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
1053 	fc_conf->pause_time = fc.pause_time[0];
1054 	fc_conf->send_xon = fc.send_xon;
1055 	fc_conf->mode = fc.mode;
1056 
1057 	return 0;
1058 }
1059 
1060 static int
1061 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1062 {
1063 	struct axgbe_port *pdata = dev->data->dev_private;
1064 	struct xgbe_fc_info fc = pdata->fc;
1065 	unsigned int reg, reg_val = 0;
1066 	reg = MAC_Q0TFCR;
1067 
1068 	pdata->pause_autoneg = fc_conf->autoneg;
1069 	pdata->phy.pause_autoneg = pdata->pause_autoneg;
1070 	fc.send_xon = fc_conf->send_xon;
1071 	AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA,
1072 			AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water));
1073 	AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD,
1074 			AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water));
1075 	AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time);
1076 	AXGMAC_IOWRITE(pdata, reg, reg_val);
1077 	fc.mode = fc_conf->mode;
1078 
1079 	if (fc.mode == RTE_FC_FULL) {
1080 		pdata->tx_pause = 1;
1081 		pdata->rx_pause = 1;
1082 	} else if (fc.mode == RTE_FC_RX_PAUSE) {
1083 		pdata->tx_pause = 0;
1084 		pdata->rx_pause = 1;
1085 	} else if (fc.mode == RTE_FC_TX_PAUSE) {
1086 		pdata->tx_pause = 1;
1087 		pdata->rx_pause = 0;
1088 	} else {
1089 		pdata->tx_pause = 0;
1090 		pdata->rx_pause = 0;
1091 	}
1092 
1093 	if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1094 		pdata->hw_if.config_tx_flow_control(pdata);
1095 
1096 	if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1097 		pdata->hw_if.config_rx_flow_control(pdata);
1098 
1099 	pdata->hw_if.config_flow_control(pdata);
1100 	pdata->phy.tx_pause = pdata->tx_pause;
1101 	pdata->phy.rx_pause = pdata->rx_pause;
1102 
1103 	return 0;
1104 }
1105 
1106 static int
1107 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
1108 		struct rte_eth_pfc_conf *pfc_conf)
1109 {
1110 	struct axgbe_port *pdata = dev->data->dev_private;
1111 	struct xgbe_fc_info fc = pdata->fc;
1112 	uint8_t tc_num;
1113 
1114 	tc_num = pdata->pfc_map[pfc_conf->priority];
1115 
1116 	if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) {
1117 		PMD_INIT_LOG(ERR, "Max supported  traffic class: %d\n",
1118 				pdata->hw_feat.tc_cnt);
1119 	return -EINVAL;
1120 	}
1121 
1122 	pdata->pause_autoneg = pfc_conf->fc.autoneg;
1123 	pdata->phy.pause_autoneg = pdata->pause_autoneg;
1124 	fc.send_xon = pfc_conf->fc.send_xon;
1125 	AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA,
1126 		AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water));
1127 	AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD,
1128 		AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water));
1129 
1130 	switch (tc_num) {
1131 	case 0:
1132 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1133 				PSTC0, pfc_conf->fc.pause_time);
1134 		break;
1135 	case 1:
1136 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1137 				PSTC1, pfc_conf->fc.pause_time);
1138 		break;
1139 	case 2:
1140 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1141 				PSTC2, pfc_conf->fc.pause_time);
1142 		break;
1143 	case 3:
1144 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1145 				PSTC3, pfc_conf->fc.pause_time);
1146 		break;
1147 	case 4:
1148 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1149 				PSTC4, pfc_conf->fc.pause_time);
1150 		break;
1151 	case 5:
1152 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1153 				PSTC5, pfc_conf->fc.pause_time);
1154 		break;
1155 	case 7:
1156 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1157 				PSTC6, pfc_conf->fc.pause_time);
1158 		break;
1159 	case 6:
1160 		AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1161 				PSTC7, pfc_conf->fc.pause_time);
1162 		break;
1163 	}
1164 
1165 	fc.mode = pfc_conf->fc.mode;
1166 
1167 	if (fc.mode == RTE_FC_FULL) {
1168 		pdata->tx_pause = 1;
1169 		pdata->rx_pause = 1;
1170 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1171 	} else if (fc.mode == RTE_FC_RX_PAUSE) {
1172 		pdata->tx_pause = 0;
1173 		pdata->rx_pause = 1;
1174 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1175 	} else if (fc.mode == RTE_FC_TX_PAUSE) {
1176 		pdata->tx_pause = 1;
1177 		pdata->rx_pause = 0;
1178 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1179 	} else {
1180 		pdata->tx_pause = 0;
1181 		pdata->rx_pause = 0;
1182 		AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1183 	}
1184 
1185 	if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1186 		pdata->hw_if.config_tx_flow_control(pdata);
1187 
1188 	if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1189 		pdata->hw_if.config_rx_flow_control(pdata);
1190 	pdata->hw_if.config_flow_control(pdata);
1191 	pdata->phy.tx_pause = pdata->tx_pause;
1192 	pdata->phy.rx_pause = pdata->rx_pause;
1193 
1194 	return 0;
1195 }
1196 
1197 void
1198 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1199 	struct rte_eth_rxq_info *qinfo)
1200 {
1201 	struct   axgbe_rx_queue *rxq;
1202 
1203 	rxq = dev->data->rx_queues[queue_id];
1204 	qinfo->mp = rxq->mb_pool;
1205 	qinfo->scattered_rx = dev->data->scattered_rx;
1206 	qinfo->nb_desc = rxq->nb_desc;
1207 	qinfo->conf.rx_free_thresh = rxq->free_thresh;
1208 }
1209 
1210 void
1211 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1212 	struct rte_eth_txq_info *qinfo)
1213 {
1214 	struct  axgbe_tx_queue *txq;
1215 
1216 	txq = dev->data->tx_queues[queue_id];
1217 	qinfo->nb_desc = txq->nb_desc;
1218 	qinfo->conf.tx_free_thresh = txq->free_thresh;
1219 }
1220 const uint32_t *
1221 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1222 {
1223 	static const uint32_t ptypes[] = {
1224 		RTE_PTYPE_L2_ETHER,
1225 		RTE_PTYPE_L2_ETHER_TIMESYNC,
1226 		RTE_PTYPE_L2_ETHER_LLDP,
1227 		RTE_PTYPE_L2_ETHER_ARP,
1228 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1229 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1230 		RTE_PTYPE_L4_FRAG,
1231 		RTE_PTYPE_L4_ICMP,
1232 		RTE_PTYPE_L4_NONFRAG,
1233 		RTE_PTYPE_L4_SCTP,
1234 		RTE_PTYPE_L4_TCP,
1235 		RTE_PTYPE_L4_UDP,
1236 		RTE_PTYPE_TUNNEL_GRENAT,
1237 		RTE_PTYPE_TUNNEL_IP,
1238 		RTE_PTYPE_INNER_L2_ETHER,
1239 		RTE_PTYPE_INNER_L2_ETHER_VLAN,
1240 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1241 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1242 		RTE_PTYPE_INNER_L4_FRAG,
1243 		RTE_PTYPE_INNER_L4_ICMP,
1244 		RTE_PTYPE_INNER_L4_NONFRAG,
1245 		RTE_PTYPE_INNER_L4_SCTP,
1246 		RTE_PTYPE_INNER_L4_TCP,
1247 		RTE_PTYPE_INNER_L4_UDP,
1248 		RTE_PTYPE_UNKNOWN
1249 	};
1250 
1251 	if (dev->rx_pkt_burst == axgbe_recv_pkts)
1252 		return ptypes;
1253 	return NULL;
1254 }
1255 
1256 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
1257 {
1258 	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
1259 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1260 
1261 	mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
1262 	mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
1263 	mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
1264 
1265 	memset(hw_feat, 0, sizeof(*hw_feat));
1266 
1267 	hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
1268 
1269 	/* Hardware feature register 0 */
1270 	hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
1271 	hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
1272 	hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
1273 	hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
1274 	hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
1275 	hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
1276 	hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
1277 	hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
1278 	hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
1279 	hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
1280 	hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
1281 	hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
1282 					      ADDMACADRSEL);
1283 	hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
1284 	hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
1285 
1286 	/* Hardware feature register 1 */
1287 	hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1288 						RXFIFOSIZE);
1289 	hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1290 						TXFIFOSIZE);
1291 	hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
1292 						 MAC_HWF1R, ADVTHWORD);
1293 	hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
1294 	hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
1295 	hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
1296 	hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
1297 	hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
1298 	hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
1299 	hw_feat->tc_cnt	       = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
1300 	hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1301 						  HASHTBLSZ);
1302 	hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1303 						  L3L4FNUM);
1304 
1305 	/* Hardware feature register 2 */
1306 	hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
1307 	hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
1308 	hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
1309 	hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
1310 	hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
1311 	hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
1312 						AUXSNAPNUM);
1313 
1314 	/* Translate the Hash Table size into actual number */
1315 	switch (hw_feat->hash_table_size) {
1316 	case 0:
1317 		break;
1318 	case 1:
1319 		hw_feat->hash_table_size = 64;
1320 		break;
1321 	case 2:
1322 		hw_feat->hash_table_size = 128;
1323 		break;
1324 	case 3:
1325 		hw_feat->hash_table_size = 256;
1326 		break;
1327 	}
1328 
1329 	/* Translate the address width setting into actual number */
1330 	switch (hw_feat->dma_width) {
1331 	case 0:
1332 		hw_feat->dma_width = 32;
1333 		break;
1334 	case 1:
1335 		hw_feat->dma_width = 40;
1336 		break;
1337 	case 2:
1338 		hw_feat->dma_width = 48;
1339 		break;
1340 	default:
1341 		hw_feat->dma_width = 32;
1342 	}
1343 
1344 	/* The Queue, Channel and TC counts are zero based so increment them
1345 	 * to get the actual number
1346 	 */
1347 	hw_feat->rx_q_cnt++;
1348 	hw_feat->tx_q_cnt++;
1349 	hw_feat->rx_ch_cnt++;
1350 	hw_feat->tx_ch_cnt++;
1351 	hw_feat->tc_cnt++;
1352 
1353 	/* Translate the fifo sizes into actual numbers */
1354 	hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
1355 	hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
1356 }
1357 
1358 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
1359 {
1360 	axgbe_init_function_ptrs_dev(&pdata->hw_if);
1361 	axgbe_init_function_ptrs_phy(&pdata->phy_if);
1362 	axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
1363 	pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
1364 }
1365 
1366 static void axgbe_set_counts(struct axgbe_port *pdata)
1367 {
1368 	/* Set all the function pointers */
1369 	axgbe_init_all_fptrs(pdata);
1370 
1371 	/* Populate the hardware features */
1372 	axgbe_get_all_hw_features(pdata);
1373 
1374 	/* Set default max values if not provided */
1375 	if (!pdata->tx_max_channel_count)
1376 		pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
1377 	if (!pdata->rx_max_channel_count)
1378 		pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
1379 
1380 	if (!pdata->tx_max_q_count)
1381 		pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
1382 	if (!pdata->rx_max_q_count)
1383 		pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
1384 
1385 	/* Calculate the number of Tx and Rx rings to be created
1386 	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
1387 	 *   the number of Tx queues to the number of Tx channels
1388 	 *   enabled
1389 	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
1390 	 *   number of Rx queues or maximum allowed
1391 	 */
1392 	pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
1393 				     pdata->tx_max_channel_count);
1394 	pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
1395 				     pdata->tx_max_q_count);
1396 
1397 	pdata->tx_q_count = pdata->tx_ring_count;
1398 
1399 	pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
1400 				     pdata->rx_max_channel_count);
1401 
1402 	pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
1403 				  pdata->rx_max_q_count);
1404 }
1405 
1406 static void axgbe_default_config(struct axgbe_port *pdata)
1407 {
1408 	pdata->pblx8 = DMA_PBL_X8_ENABLE;
1409 	pdata->tx_sf_mode = MTL_TSF_ENABLE;
1410 	pdata->tx_threshold = MTL_TX_THRESHOLD_64;
1411 	pdata->tx_pbl = DMA_PBL_32;
1412 	pdata->tx_osp_mode = DMA_OSP_ENABLE;
1413 	pdata->rx_sf_mode = MTL_RSF_ENABLE;
1414 	pdata->rx_threshold = MTL_RX_THRESHOLD_64;
1415 	pdata->rx_pbl = DMA_PBL_32;
1416 	pdata->pause_autoneg = 1;
1417 	pdata->tx_pause = 0;
1418 	pdata->rx_pause = 0;
1419 	pdata->phy_speed = SPEED_UNKNOWN;
1420 	pdata->power_down = 0;
1421 }
1422 
1423 static int
1424 pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
1425 {
1426 	const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
1427 	const struct rte_pci_id *pcid = _pci_id;
1428 
1429 	if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
1430 			pdev->id.device_id == pcid->device_id)
1431 		return 0;
1432 	return 1;
1433 }
1434 
1435 static bool
1436 pci_search_device(int device_id)
1437 {
1438 	struct rte_bus *pci_bus;
1439 	struct rte_pci_id dev_id;
1440 
1441 	dev_id.device_id = device_id;
1442 	pci_bus = rte_bus_find_by_name("pci");
1443 	return (pci_bus != NULL) &&
1444 		(pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
1445 }
1446 
1447 /*
1448  * It returns 0 on success.
1449  */
1450 static int
1451 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
1452 {
1453 	PMD_INIT_FUNC_TRACE();
1454 	struct axgbe_port *pdata;
1455 	struct rte_pci_device *pci_dev;
1456 	uint32_t reg, mac_lo, mac_hi;
1457 	uint32_t len;
1458 	int ret;
1459 
1460 	eth_dev->dev_ops = &axgbe_eth_dev_ops;
1461 
1462 	/*
1463 	 * For secondary processes, we don't initialise any further as primary
1464 	 * has already done this work.
1465 	 */
1466 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1467 		return 0;
1468 
1469 	pdata = eth_dev->data->dev_private;
1470 	/* initial state */
1471 	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
1472 	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
1473 	pdata->eth_dev = eth_dev;
1474 
1475 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1476 	pdata->pci_dev = pci_dev;
1477 
1478 	/*
1479 	 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
1480 	 */
1481 	if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
1482 		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
1483 		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
1484 	} else {
1485 		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
1486 		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
1487 	}
1488 
1489 	pdata->xgmac_regs =
1490 		(void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
1491 	pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
1492 				     + AXGBE_MAC_PROP_OFFSET);
1493 	pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
1494 				    + AXGBE_I2C_CTRL_OFFSET);
1495 	pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
1496 
1497 	/* version specific driver data*/
1498 	if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
1499 		pdata->vdata = &axgbe_v2a;
1500 	else
1501 		pdata->vdata = &axgbe_v2b;
1502 
1503 	/* Configure the PCS indirect addressing support */
1504 	reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
1505 	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
1506 	pdata->xpcs_window <<= 6;
1507 	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
1508 	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
1509 	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
1510 
1511 	PMD_INIT_LOG(DEBUG,
1512 		     "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
1513 		     pdata->xpcs_window_size, pdata->xpcs_window_mask);
1514 	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
1515 
1516 	/* Retrieve the MAC address */
1517 	mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
1518 	mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
1519 	pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
1520 	pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
1521 	pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
1522 	pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
1523 	pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
1524 	pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
1525 
1526 	len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
1527 	eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
1528 
1529 	if (!eth_dev->data->mac_addrs) {
1530 		PMD_INIT_LOG(ERR,
1531 			     "Failed to alloc %u bytes needed to "
1532 			     "store MAC addresses", len);
1533 		return -ENOMEM;
1534 	}
1535 
1536 	/* Allocate memory for storing hash filter MAC addresses */
1537 	len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS;
1538 	eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr",
1539 						    len, 0);
1540 
1541 	if (eth_dev->data->hash_mac_addrs == NULL) {
1542 		PMD_INIT_LOG(ERR,
1543 			     "Failed to allocate %d bytes needed to "
1544 			     "store MAC addresses", len);
1545 		return -ENOMEM;
1546 	}
1547 
1548 	if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
1549 		rte_eth_random_addr(pdata->mac_addr.addr_bytes);
1550 
1551 	/* Copy the permanent MAC address */
1552 	rte_ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
1553 
1554 	/* Clock settings */
1555 	pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
1556 	pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1557 
1558 	/* Set the DMA coherency values */
1559 	pdata->coherent = 1;
1560 	pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
1561 	pdata->arcache = AXGBE_DMA_OS_ARCACHE;
1562 	pdata->awcache = AXGBE_DMA_OS_AWCACHE;
1563 
1564 	/* Set the maximum channels and queues */
1565 	reg = XP_IOREAD(pdata, XP_PROP_1);
1566 	pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
1567 	pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
1568 	pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
1569 	pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
1570 
1571 	/* Set the hardware channel and queue counts */
1572 	axgbe_set_counts(pdata);
1573 
1574 	/* Set the maximum fifo amounts */
1575 	reg = XP_IOREAD(pdata, XP_PROP_2);
1576 	pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
1577 	pdata->tx_max_fifo_size *= 16384;
1578 	pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1579 					  pdata->vdata->tx_max_fifo_size);
1580 	pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
1581 	pdata->rx_max_fifo_size *= 16384;
1582 	pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1583 					  pdata->vdata->rx_max_fifo_size);
1584 	/* Issue software reset to DMA */
1585 	ret = pdata->hw_if.exit(pdata);
1586 	if (ret)
1587 		PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
1588 
1589 	/* Set default configuration data */
1590 	axgbe_default_config(pdata);
1591 
1592 	/* Set default max values if not provided */
1593 	if (!pdata->tx_max_fifo_size)
1594 		pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
1595 	if (!pdata->rx_max_fifo_size)
1596 		pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
1597 
1598 	pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
1599 	pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
1600 	pthread_mutex_init(&pdata->xpcs_mutex, NULL);
1601 	pthread_mutex_init(&pdata->i2c_mutex, NULL);
1602 	pthread_mutex_init(&pdata->an_mutex, NULL);
1603 	pthread_mutex_init(&pdata->phy_mutex, NULL);
1604 
1605 	ret = pdata->phy_if.phy_init(pdata);
1606 	if (ret) {
1607 		rte_free(eth_dev->data->mac_addrs);
1608 		eth_dev->data->mac_addrs = NULL;
1609 		return ret;
1610 	}
1611 
1612 	rte_intr_callback_register(&pci_dev->intr_handle,
1613 				   axgbe_dev_interrupt_handler,
1614 				   (void *)eth_dev);
1615 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1616 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
1617 		     pci_dev->id.device_id);
1618 
1619 	return 0;
1620 }
1621 
1622 static int
1623 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1624 {
1625 	struct rte_pci_device *pci_dev;
1626 
1627 	PMD_INIT_FUNC_TRACE();
1628 
1629 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1630 		return 0;
1631 
1632 	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1633 	eth_dev->dev_ops = NULL;
1634 	eth_dev->rx_pkt_burst = NULL;
1635 	eth_dev->tx_pkt_burst = NULL;
1636 	axgbe_dev_clear_queues(eth_dev);
1637 
1638 	/* disable uio intr before callback unregister */
1639 	rte_intr_disable(&pci_dev->intr_handle);
1640 	rte_intr_callback_unregister(&pci_dev->intr_handle,
1641 				     axgbe_dev_interrupt_handler,
1642 				     (void *)eth_dev);
1643 
1644 	return 0;
1645 }
1646 
1647 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1648 	struct rte_pci_device *pci_dev)
1649 {
1650 	return rte_eth_dev_pci_generic_probe(pci_dev,
1651 		sizeof(struct axgbe_port), eth_axgbe_dev_init);
1652 }
1653 
1654 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
1655 {
1656 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
1657 }
1658 
1659 static struct rte_pci_driver rte_axgbe_pmd = {
1660 	.id_table = pci_id_axgbe_map,
1661 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1662 	.probe = eth_axgbe_pci_probe,
1663 	.remove = eth_axgbe_pci_remove,
1664 };
1665 
1666 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
1667 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
1668 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1669 
1670 RTE_INIT(axgbe_init_log)
1671 {
1672 	axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
1673 	if (axgbe_logtype_init >= 0)
1674 		rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
1675 	axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
1676 	if (axgbe_logtype_driver >= 0)
1677 		rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
1678 }
1679