xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_ethdev_pci.h>
6 
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
10 #include "atl_logs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
14 
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
17 
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 			       struct rte_eth_dev_info *dev_info);
48 
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
50 
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
52 
53 /* VLAN stuff */
54 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
55 		uint16_t vlan_id, int on);
56 
57 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
58 
59 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
60 				     uint16_t queue_id, int on);
61 
62 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
63 			     enum rte_vlan_type vlan_type, uint16_t tpid);
64 
65 /* EEPROM */
66 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
67 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
68 			      struct rte_dev_eeprom_info *eeprom);
69 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 
72 /* Regs */
73 static int atl_dev_get_regs(struct rte_eth_dev *dev,
74 			    struct rte_dev_reg_info *regs);
75 
76 /* Flow control */
77 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
78 			       struct rte_eth_fc_conf *fc_conf);
79 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 
82 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
83 
84 /* Interrupts */
85 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
86 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
87 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
88 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
89 				    struct rte_intr_handle *handle);
90 static void atl_dev_interrupt_handler(void *param);
91 
92 
93 static int atl_add_mac_addr(struct rte_eth_dev *dev,
94 			    struct ether_addr *mac_addr,
95 			    uint32_t index, uint32_t pool);
96 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
97 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
98 					   struct ether_addr *mac_addr);
99 
100 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
101 				    struct ether_addr *mc_addr_set,
102 				    uint32_t nb_mc_addr);
103 
104 /* RSS */
105 static int atl_reta_update(struct rte_eth_dev *dev,
106 			     struct rte_eth_rss_reta_entry64 *reta_conf,
107 			     uint16_t reta_size);
108 static int atl_reta_query(struct rte_eth_dev *dev,
109 			    struct rte_eth_rss_reta_entry64 *reta_conf,
110 			    uint16_t reta_size);
111 static int atl_rss_hash_update(struct rte_eth_dev *dev,
112 				 struct rte_eth_rss_conf *rss_conf);
113 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
114 				   struct rte_eth_rss_conf *rss_conf);
115 
116 
117 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
118 	struct rte_pci_device *pci_dev);
119 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
120 
121 static void atl_dev_info_get(struct rte_eth_dev *dev,
122 				struct rte_eth_dev_info *dev_info);
123 
124 int atl_logtype_init;
125 int atl_logtype_driver;
126 
127 /*
128  * The set of PCI devices this driver supports
129  */
130 static const struct rte_pci_id pci_id_atl_map[] = {
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
136 
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
143 
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
150 
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
152 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
153 	{ .vendor_id = 0, /* sentinel */ },
154 };
155 
156 static struct rte_pci_driver rte_atl_pmd = {
157 	.id_table = pci_id_atl_map,
158 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
159 		     RTE_PCI_DRV_IOVA_AS_VA,
160 	.probe = eth_atl_pci_probe,
161 	.remove = eth_atl_pci_remove,
162 };
163 
164 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
165 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
166 			| DEV_RX_OFFLOAD_UDP_CKSUM \
167 			| DEV_RX_OFFLOAD_TCP_CKSUM \
168 			| DEV_RX_OFFLOAD_JUMBO_FRAME)
169 
170 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
171 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
172 			| DEV_TX_OFFLOAD_UDP_CKSUM \
173 			| DEV_TX_OFFLOAD_TCP_CKSUM \
174 			| DEV_TX_OFFLOAD_TCP_TSO \
175 			| DEV_TX_OFFLOAD_MULTI_SEGS)
176 
177 static const struct rte_eth_desc_lim rx_desc_lim = {
178 	.nb_max = ATL_MAX_RING_DESC,
179 	.nb_min = ATL_MIN_RING_DESC,
180 	.nb_align = ATL_RXD_ALIGN,
181 };
182 
183 static const struct rte_eth_desc_lim tx_desc_lim = {
184 	.nb_max = ATL_MAX_RING_DESC,
185 	.nb_min = ATL_MIN_RING_DESC,
186 	.nb_align = ATL_TXD_ALIGN,
187 	.nb_seg_max = ATL_TX_MAX_SEG,
188 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
189 };
190 
191 #define ATL_XSTATS_FIELD(name) { \
192 	#name, \
193 	offsetof(struct aq_stats_s, name) \
194 }
195 
196 struct atl_xstats_tbl_s {
197 	const char *name;
198 	unsigned int offset;
199 };
200 
201 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
202 	ATL_XSTATS_FIELD(uprc),
203 	ATL_XSTATS_FIELD(mprc),
204 	ATL_XSTATS_FIELD(bprc),
205 	ATL_XSTATS_FIELD(erpt),
206 	ATL_XSTATS_FIELD(uptc),
207 	ATL_XSTATS_FIELD(mptc),
208 	ATL_XSTATS_FIELD(bptc),
209 	ATL_XSTATS_FIELD(erpr),
210 	ATL_XSTATS_FIELD(ubrc),
211 	ATL_XSTATS_FIELD(ubtc),
212 	ATL_XSTATS_FIELD(mbrc),
213 	ATL_XSTATS_FIELD(mbtc),
214 	ATL_XSTATS_FIELD(bbrc),
215 	ATL_XSTATS_FIELD(bbtc),
216 };
217 
218 static const struct eth_dev_ops atl_eth_dev_ops = {
219 	.dev_configure	      = atl_dev_configure,
220 	.dev_start	      = atl_dev_start,
221 	.dev_stop	      = atl_dev_stop,
222 	.dev_set_link_up      = atl_dev_set_link_up,
223 	.dev_set_link_down    = atl_dev_set_link_down,
224 	.dev_close	      = atl_dev_close,
225 	.dev_reset	      = atl_dev_reset,
226 
227 	/* PROMISC */
228 	.promiscuous_enable   = atl_dev_promiscuous_enable,
229 	.promiscuous_disable  = atl_dev_promiscuous_disable,
230 	.allmulticast_enable  = atl_dev_allmulticast_enable,
231 	.allmulticast_disable = atl_dev_allmulticast_disable,
232 
233 	/* Link */
234 	.link_update	      = atl_dev_link_update,
235 
236 	.get_reg              = atl_dev_get_regs,
237 
238 	/* Stats */
239 	.stats_get	      = atl_dev_stats_get,
240 	.xstats_get	      = atl_dev_xstats_get,
241 	.xstats_get_names     = atl_dev_xstats_get_names,
242 	.stats_reset	      = atl_dev_stats_reset,
243 	.xstats_reset	      = atl_dev_stats_reset,
244 
245 	.fw_version_get       = atl_fw_version_get,
246 	.dev_infos_get	      = atl_dev_info_get,
247 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
248 
249 	.mtu_set              = atl_dev_mtu_set,
250 
251 	/* VLAN */
252 	.vlan_filter_set      = atl_vlan_filter_set,
253 	.vlan_offload_set     = atl_vlan_offload_set,
254 	.vlan_tpid_set        = atl_vlan_tpid_set,
255 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
256 
257 	/* Queue Control */
258 	.rx_queue_start	      = atl_rx_queue_start,
259 	.rx_queue_stop	      = atl_rx_queue_stop,
260 	.rx_queue_setup       = atl_rx_queue_setup,
261 	.rx_queue_release     = atl_rx_queue_release,
262 
263 	.tx_queue_start	      = atl_tx_queue_start,
264 	.tx_queue_stop	      = atl_tx_queue_stop,
265 	.tx_queue_setup       = atl_tx_queue_setup,
266 	.tx_queue_release     = atl_tx_queue_release,
267 
268 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
269 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
270 
271 	.rx_queue_count       = atl_rx_queue_count,
272 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
273 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
274 
275 	/* EEPROM */
276 	.get_eeprom_length    = atl_dev_get_eeprom_length,
277 	.get_eeprom           = atl_dev_get_eeprom,
278 	.set_eeprom           = atl_dev_set_eeprom,
279 
280 	/* Flow Control */
281 	.flow_ctrl_get	      = atl_flow_ctrl_get,
282 	.flow_ctrl_set	      = atl_flow_ctrl_set,
283 
284 	/* MAC */
285 	.mac_addr_add	      = atl_add_mac_addr,
286 	.mac_addr_remove      = atl_remove_mac_addr,
287 	.mac_addr_set	      = atl_set_default_mac_addr,
288 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
289 	.rxq_info_get	      = atl_rxq_info_get,
290 	.txq_info_get	      = atl_txq_info_get,
291 
292 	.reta_update          = atl_reta_update,
293 	.reta_query           = atl_reta_query,
294 	.rss_hash_update      = atl_rss_hash_update,
295 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
296 };
297 
298 static inline int32_t
299 atl_reset_hw(struct aq_hw_s *hw)
300 {
301 	return hw_atl_b0_hw_reset(hw);
302 }
303 
304 static inline void
305 atl_enable_intr(struct rte_eth_dev *dev)
306 {
307 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
308 
309 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
310 }
311 
312 static void
313 atl_disable_intr(struct aq_hw_s *hw)
314 {
315 	PMD_INIT_FUNC_TRACE();
316 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
317 }
318 
319 static int
320 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
321 {
322 	struct atl_adapter *adapter =
323 		(struct atl_adapter *)eth_dev->data->dev_private;
324 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
325 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
326 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
327 	int err = 0;
328 
329 	PMD_INIT_FUNC_TRACE();
330 
331 	eth_dev->dev_ops = &atl_eth_dev_ops;
332 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
333 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
334 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
335 
336 	/* For secondary processes, the primary process has done all the work */
337 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
338 		return 0;
339 
340 	/* Vendor and Device ID need to be set before init of shared code */
341 	hw->device_id = pci_dev->id.device_id;
342 	hw->vendor_id = pci_dev->id.vendor_id;
343 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
344 
345 	/* Hardware configuration - hardcode */
346 	adapter->hw_cfg.is_lro = false;
347 	adapter->hw_cfg.wol = false;
348 	adapter->hw_cfg.is_rss = false;
349 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
350 
351 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
352 			  AQ_NIC_RATE_5G |
353 			  AQ_NIC_RATE_2G5 |
354 			  AQ_NIC_RATE_1G |
355 			  AQ_NIC_RATE_100M;
356 
357 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
358 	adapter->hw_cfg.aq_rss.indirection_table_size =
359 		HW_ATL_B0_RSS_REDIRECTION_MAX;
360 
361 	hw->aq_nic_cfg = &adapter->hw_cfg;
362 
363 	/* disable interrupt */
364 	atl_disable_intr(hw);
365 
366 	/* Allocate memory for storing MAC addresses */
367 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
368 	if (eth_dev->data->mac_addrs == NULL) {
369 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
370 		return -ENOMEM;
371 	}
372 
373 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
374 	if (err)
375 		return err;
376 
377 	/* Copy the permanent MAC address */
378 	if (hw->aq_fw_ops->get_mac_permanent(hw,
379 			eth_dev->data->mac_addrs->addr_bytes) != 0)
380 		return -EINVAL;
381 
382 	/* Reset the hw statistics */
383 	atl_dev_stats_reset(eth_dev);
384 
385 	rte_intr_callback_register(intr_handle,
386 				   atl_dev_interrupt_handler, eth_dev);
387 
388 	/* enable uio/vfio intr/eventfd mapping */
389 	rte_intr_enable(intr_handle);
390 
391 	/* enable support intr */
392 	atl_enable_intr(eth_dev);
393 
394 	return err;
395 }
396 
397 static int
398 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
399 {
400 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
401 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
402 	struct aq_hw_s *hw;
403 
404 	PMD_INIT_FUNC_TRACE();
405 
406 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
407 		return -EPERM;
408 
409 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
410 
411 	if (hw->adapter_stopped == 0)
412 		atl_dev_close(eth_dev);
413 
414 	eth_dev->dev_ops = NULL;
415 	eth_dev->rx_pkt_burst = NULL;
416 	eth_dev->tx_pkt_burst = NULL;
417 
418 	/* disable uio intr before callback unregister */
419 	rte_intr_disable(intr_handle);
420 	rte_intr_callback_unregister(intr_handle,
421 				     atl_dev_interrupt_handler, eth_dev);
422 
423 	rte_free(eth_dev->data->mac_addrs);
424 	eth_dev->data->mac_addrs = NULL;
425 
426 	return 0;
427 }
428 
429 static int
430 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
431 	struct rte_pci_device *pci_dev)
432 {
433 	return rte_eth_dev_pci_generic_probe(pci_dev,
434 		sizeof(struct atl_adapter), eth_atl_dev_init);
435 }
436 
437 static int
438 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
439 {
440 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
441 }
442 
443 static int
444 atl_dev_configure(struct rte_eth_dev *dev)
445 {
446 	struct atl_interrupt *intr =
447 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
448 
449 	PMD_INIT_FUNC_TRACE();
450 
451 	/* set flag to update link status after init */
452 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
453 
454 	return 0;
455 }
456 
457 /*
458  * Configure device link speed and setup link.
459  * It returns 0 on success.
460  */
461 static int
462 atl_dev_start(struct rte_eth_dev *dev)
463 {
464 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
465 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
466 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
467 	uint32_t intr_vector = 0;
468 	uint32_t *link_speeds;
469 	uint32_t speed = 0;
470 	int status;
471 	int err;
472 
473 	PMD_INIT_FUNC_TRACE();
474 
475 	/* set adapter started */
476 	hw->adapter_stopped = 0;
477 
478 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
479 		PMD_INIT_LOG(ERR,
480 		"Invalid link_speeds for port %u, fix speed not supported",
481 				dev->data->port_id);
482 		return -EINVAL;
483 	}
484 
485 	/* disable uio/vfio intr/eventfd mapping */
486 	rte_intr_disable(intr_handle);
487 
488 	/* reinitialize adapter
489 	 * this calls reset and start
490 	 */
491 	status = atl_reset_hw(hw);
492 	if (status != 0)
493 		return -EIO;
494 
495 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
496 
497 	hw_atl_b0_hw_start(hw);
498 	/* check and configure queue intr-vector mapping */
499 	if ((rte_intr_cap_multiple(intr_handle) ||
500 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
501 	    dev->data->dev_conf.intr_conf.rxq != 0) {
502 		intr_vector = dev->data->nb_rx_queues;
503 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
504 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
505 					ATL_MAX_INTR_QUEUE_NUM);
506 			return -ENOTSUP;
507 		}
508 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
509 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
510 			return -1;
511 		}
512 	}
513 
514 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
515 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
516 				    dev->data->nb_rx_queues * sizeof(int), 0);
517 		if (intr_handle->intr_vec == NULL) {
518 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
519 				     " intr_vec", dev->data->nb_rx_queues);
520 			return -ENOMEM;
521 		}
522 	}
523 
524 	/* initialize transmission unit */
525 	atl_tx_init(dev);
526 
527 	/* This can fail when allocating mbufs for descriptor rings */
528 	err = atl_rx_init(dev);
529 	if (err) {
530 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
531 		goto error;
532 	}
533 
534 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
535 		hw->fw_ver_actual >> 24,
536 		(hw->fw_ver_actual >> 16) & 0xFF,
537 		hw->fw_ver_actual & 0xFFFF);
538 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
539 
540 	err = atl_start_queues(dev);
541 	if (err < 0) {
542 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
543 		goto error;
544 	}
545 
546 	err = hw->aq_fw_ops->update_link_status(hw);
547 
548 	if (err)
549 		goto error;
550 
551 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
552 
553 	link_speeds = &dev->data->dev_conf.link_speeds;
554 
555 	speed = 0x0;
556 
557 	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
558 		speed = hw->aq_nic_cfg->link_speed_msk;
559 	} else {
560 		if (*link_speeds & ETH_LINK_SPEED_10G)
561 			speed |= AQ_NIC_RATE_10G;
562 		if (*link_speeds & ETH_LINK_SPEED_5G)
563 			speed |= AQ_NIC_RATE_5G;
564 		if (*link_speeds & ETH_LINK_SPEED_1G)
565 			speed |= AQ_NIC_RATE_1G;
566 		if (*link_speeds & ETH_LINK_SPEED_2_5G)
567 			speed |=  AQ_NIC_RATE_2G5;
568 		if (*link_speeds & ETH_LINK_SPEED_100M)
569 			speed |= AQ_NIC_RATE_100M;
570 	}
571 
572 	err = hw->aq_fw_ops->set_link_speed(hw, speed);
573 	if (err)
574 		goto error;
575 
576 	if (rte_intr_allow_others(intr_handle)) {
577 		/* check if lsc interrupt is enabled */
578 		if (dev->data->dev_conf.intr_conf.lsc != 0)
579 			atl_dev_lsc_interrupt_setup(dev, true);
580 		else
581 			atl_dev_lsc_interrupt_setup(dev, false);
582 	} else {
583 		rte_intr_callback_unregister(intr_handle,
584 					     atl_dev_interrupt_handler, dev);
585 		if (dev->data->dev_conf.intr_conf.lsc != 0)
586 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
587 				     " no intr multiplex");
588 	}
589 
590 	/* check if rxq interrupt is enabled */
591 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
592 	    rte_intr_dp_is_en(intr_handle))
593 		atl_dev_rxq_interrupt_setup(dev);
594 
595 	/* enable uio/vfio intr/eventfd mapping */
596 	rte_intr_enable(intr_handle);
597 
598 	/* resume enabled intr since hw reset */
599 	atl_enable_intr(dev);
600 
601 	return 0;
602 
603 error:
604 	atl_stop_queues(dev);
605 	return -EIO;
606 }
607 
608 /*
609  * Stop device: disable rx and tx functions to allow for reconfiguring.
610  */
611 static void
612 atl_dev_stop(struct rte_eth_dev *dev)
613 {
614 	struct rte_eth_link link;
615 	struct aq_hw_s *hw =
616 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
617 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
618 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
619 
620 	PMD_INIT_FUNC_TRACE();
621 
622 	/* disable interrupts */
623 	atl_disable_intr(hw);
624 
625 	/* reset the NIC */
626 	atl_reset_hw(hw);
627 	hw->adapter_stopped = 1;
628 
629 	atl_stop_queues(dev);
630 
631 	/* Clear stored conf */
632 	dev->data->scattered_rx = 0;
633 	dev->data->lro = 0;
634 
635 	/* Clear recorded link status */
636 	memset(&link, 0, sizeof(link));
637 	rte_eth_linkstatus_set(dev, &link);
638 
639 	if (!rte_intr_allow_others(intr_handle))
640 		/* resume to the default handler */
641 		rte_intr_callback_register(intr_handle,
642 					   atl_dev_interrupt_handler,
643 					   (void *)dev);
644 
645 	/* Clean datapath event and queue/vec mapping */
646 	rte_intr_efd_disable(intr_handle);
647 	if (intr_handle->intr_vec != NULL) {
648 		rte_free(intr_handle->intr_vec);
649 		intr_handle->intr_vec = NULL;
650 	}
651 }
652 
653 /*
654  * Set device link up: enable tx.
655  */
656 static int
657 atl_dev_set_link_up(struct rte_eth_dev *dev)
658 {
659 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
660 
661 	return hw->aq_fw_ops->set_link_speed(hw,
662 			hw->aq_nic_cfg->link_speed_msk);
663 }
664 
665 /*
666  * Set device link down: disable tx.
667  */
668 static int
669 atl_dev_set_link_down(struct rte_eth_dev *dev)
670 {
671 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
672 
673 	return hw->aq_fw_ops->set_link_speed(hw, 0);
674 }
675 
676 /*
677  * Reset and stop device.
678  */
679 static void
680 atl_dev_close(struct rte_eth_dev *dev)
681 {
682 	PMD_INIT_FUNC_TRACE();
683 
684 	atl_dev_stop(dev);
685 
686 	atl_free_queues(dev);
687 }
688 
689 static int
690 atl_dev_reset(struct rte_eth_dev *dev)
691 {
692 	int ret;
693 
694 	ret = eth_atl_dev_uninit(dev);
695 	if (ret)
696 		return ret;
697 
698 	ret = eth_atl_dev_init(dev);
699 
700 	return ret;
701 }
702 
703 
704 static int
705 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
706 {
707 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
708 	struct aq_hw_s *hw = &adapter->hw;
709 	struct atl_sw_stats *swstats = &adapter->sw_stats;
710 	unsigned int i;
711 
712 	hw->aq_fw_ops->update_stats(hw);
713 
714 	/* Fill out the rte_eth_stats statistics structure */
715 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
716 	stats->ibytes = hw->curr_stats.dma_oct_rc;
717 	stats->imissed = hw->curr_stats.dpc;
718 	stats->ierrors = hw->curr_stats.erpt;
719 
720 	stats->opackets = hw->curr_stats.dma_pkt_tc;
721 	stats->obytes = hw->curr_stats.dma_oct_tc;
722 	stats->oerrors = 0;
723 
724 	stats->rx_nombuf = swstats->rx_nombuf;
725 
726 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
727 		stats->q_ipackets[i] = swstats->q_ipackets[i];
728 		stats->q_opackets[i] = swstats->q_opackets[i];
729 		stats->q_ibytes[i] = swstats->q_ibytes[i];
730 		stats->q_obytes[i] = swstats->q_obytes[i];
731 		stats->q_errors[i] = swstats->q_errors[i];
732 	}
733 	return 0;
734 }
735 
736 static void
737 atl_dev_stats_reset(struct rte_eth_dev *dev)
738 {
739 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
740 	struct aq_hw_s *hw = &adapter->hw;
741 
742 	hw->aq_fw_ops->update_stats(hw);
743 
744 	/* Reset software totals */
745 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
746 
747 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
748 }
749 
750 static int
751 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
752 			 struct rte_eth_xstat_name *xstats_names,
753 			 unsigned int size)
754 {
755 	unsigned int i;
756 
757 	if (!xstats_names)
758 		return RTE_DIM(atl_xstats_tbl);
759 
760 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
761 		snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
762 			atl_xstats_tbl[i].name);
763 
764 	return size;
765 }
766 
767 static int
768 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
769 		   unsigned int n)
770 {
771 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
772 	struct aq_hw_s *hw = &adapter->hw;
773 	unsigned int i;
774 
775 	if (!stats)
776 		return 0;
777 
778 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
779 		stats[i].id = i;
780 		stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
781 					atl_xstats_tbl[i].offset);
782 	}
783 
784 	return n;
785 }
786 
787 static int
788 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
789 {
790 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
791 	uint32_t fw_ver = 0;
792 	unsigned int ret = 0;
793 
794 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
795 	if (ret)
796 		return -EIO;
797 
798 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
799 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
800 
801 	ret += 1; /* add string null-terminator */
802 
803 	if (fw_size < ret)
804 		return ret;
805 
806 	return 0;
807 }
808 
809 static void
810 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
811 {
812 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
813 
814 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
815 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
816 
817 	dev_info->min_rx_bufsize = 1024;
818 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
819 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
820 	dev_info->max_vfs = pci_dev->max_vfs;
821 
822 	dev_info->max_hash_mac_addrs = 0;
823 	dev_info->max_vmdq_pools = 0;
824 	dev_info->vmdq_queue_num = 0;
825 
826 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
827 
828 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
829 
830 
831 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
832 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
833 	};
834 
835 	dev_info->default_txconf = (struct rte_eth_txconf) {
836 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
837 	};
838 
839 	dev_info->rx_desc_lim = rx_desc_lim;
840 	dev_info->tx_desc_lim = tx_desc_lim;
841 
842 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
843 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
844 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
845 
846 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
847 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
848 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
849 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
850 }
851 
852 static const uint32_t *
853 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
854 {
855 	static const uint32_t ptypes[] = {
856 		RTE_PTYPE_L2_ETHER,
857 		RTE_PTYPE_L2_ETHER_ARP,
858 		RTE_PTYPE_L2_ETHER_VLAN,
859 		RTE_PTYPE_L3_IPV4,
860 		RTE_PTYPE_L3_IPV6,
861 		RTE_PTYPE_L4_TCP,
862 		RTE_PTYPE_L4_UDP,
863 		RTE_PTYPE_L4_SCTP,
864 		RTE_PTYPE_L4_ICMP,
865 		RTE_PTYPE_UNKNOWN
866 	};
867 
868 	if (dev->rx_pkt_burst == atl_recv_pkts)
869 		return ptypes;
870 
871 	return NULL;
872 }
873 
874 /* return 0 means link status changed, -1 means not changed */
875 static int
876 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
877 {
878 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
879 	struct atl_interrupt *intr =
880 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
881 	struct rte_eth_link link, old;
882 	int err = 0;
883 
884 	link.link_status = ETH_LINK_DOWN;
885 	link.link_speed = 0;
886 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
887 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
888 	memset(&old, 0, sizeof(old));
889 
890 	/* load old link status */
891 	rte_eth_linkstatus_get(dev, &old);
892 
893 	/* read current link status */
894 	err = hw->aq_fw_ops->update_link_status(hw);
895 
896 	if (err)
897 		return 0;
898 
899 	if (hw->aq_link_status.mbps == 0) {
900 		/* write default (down) link status */
901 		rte_eth_linkstatus_set(dev, &link);
902 		if (link.link_status == old.link_status)
903 			return -1;
904 		return 0;
905 	}
906 
907 	intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
908 
909 	link.link_status = ETH_LINK_UP;
910 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
911 	link.link_speed = hw->aq_link_status.mbps;
912 
913 	rte_eth_linkstatus_set(dev, &link);
914 
915 	if (link.link_status == old.link_status)
916 		return -1;
917 
918 	return 0;
919 }
920 
921 static void
922 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
923 {
924 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
925 
926 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
927 }
928 
929 static void
930 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
931 {
932 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
933 
934 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
935 }
936 
937 static void
938 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
939 {
940 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
941 
942 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
943 }
944 
945 static void
946 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
947 {
948 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
949 
950 	if (dev->data->promiscuous == 1)
951 		return; /* must remain in all_multicast mode */
952 
953 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
954 }
955 
956 /**
957  * It clears the interrupt causes and enables the interrupt.
958  * It will be called once only during nic initialized.
959  *
960  * @param dev
961  *  Pointer to struct rte_eth_dev.
962  * @param on
963  *  Enable or Disable.
964  *
965  * @return
966  *  - On success, zero.
967  *  - On failure, a negative value.
968  */
969 
970 static int
971 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
972 {
973 	atl_dev_link_status_print(dev);
974 	return 0;
975 }
976 
977 static int
978 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
979 {
980 	return 0;
981 }
982 
983 
984 static int
985 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
986 {
987 	struct atl_interrupt *intr =
988 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
989 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
990 	u64 cause = 0;
991 
992 	hw_atl_b0_hw_irq_read(hw, &cause);
993 
994 	atl_disable_intr(hw);
995 	intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
996 			ATL_FLAG_NEED_LINK_UPDATE : 0;
997 
998 	return 0;
999 }
1000 
1001 /**
1002  * It gets and then prints the link status.
1003  *
1004  * @param dev
1005  *  Pointer to struct rte_eth_dev.
1006  *
1007  * @return
1008  *  - On success, zero.
1009  *  - On failure, a negative value.
1010  */
1011 static void
1012 atl_dev_link_status_print(struct rte_eth_dev *dev)
1013 {
1014 	struct rte_eth_link link;
1015 
1016 	memset(&link, 0, sizeof(link));
1017 	rte_eth_linkstatus_get(dev, &link);
1018 	if (link.link_status) {
1019 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1020 					(int)(dev->data->port_id),
1021 					(unsigned int)link.link_speed,
1022 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1023 					"full-duplex" : "half-duplex");
1024 	} else {
1025 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1026 				(int)(dev->data->port_id));
1027 	}
1028 
1029 
1030 #ifdef DEBUG
1031 {
1032 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1033 
1034 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1035 				pci_dev->addr.domain,
1036 				pci_dev->addr.bus,
1037 				pci_dev->addr.devid,
1038 				pci_dev->addr.function);
1039 }
1040 #endif
1041 
1042 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1043 }
1044 
1045 /*
1046  * It executes link_update after knowing an interrupt occurred.
1047  *
1048  * @param dev
1049  *  Pointer to struct rte_eth_dev.
1050  *
1051  * @return
1052  *  - On success, zero.
1053  *  - On failure, a negative value.
1054  */
1055 static int
1056 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1057 			   struct rte_intr_handle *intr_handle)
1058 {
1059 	struct atl_interrupt *intr =
1060 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1061 
1062 	if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1063 		atl_dev_link_update(dev, 0);
1064 		intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1065 		atl_dev_link_status_print(dev);
1066 		_rte_eth_dev_callback_process(dev,
1067 			RTE_ETH_EVENT_INTR_LSC, NULL);
1068 	}
1069 
1070 	atl_enable_intr(dev);
1071 	rte_intr_enable(intr_handle);
1072 
1073 	return 0;
1074 }
1075 
1076 /**
1077  * Interrupt handler triggered by NIC  for handling
1078  * specific interrupt.
1079  *
1080  * @param handle
1081  *  Pointer to interrupt handle.
1082  * @param param
1083  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1084  *
1085  * @return
1086  *  void
1087  */
1088 static void
1089 atl_dev_interrupt_handler(void *param)
1090 {
1091 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1092 
1093 	atl_dev_interrupt_get_status(dev);
1094 	atl_dev_interrupt_action(dev, dev->intr_handle);
1095 }
1096 
1097 #define SFP_EEPROM_SIZE 0xff
1098 
1099 static int
1100 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1101 {
1102 	return SFP_EEPROM_SIZE;
1103 }
1104 
1105 static int
1106 atl_dev_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1107 {
1108 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1109 
1110 	if (hw->aq_fw_ops->get_eeprom == NULL)
1111 		return -ENOTSUP;
1112 
1113 	if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1114 		return -EINVAL;
1115 
1116 	return hw->aq_fw_ops->get_eeprom(hw, eeprom->data, eeprom->length);
1117 }
1118 
1119 static int
1120 atl_dev_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1121 {
1122 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1123 
1124 	if (hw->aq_fw_ops->set_eeprom == NULL)
1125 		return -ENOTSUP;
1126 
1127 	if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1128 		return -EINVAL;
1129 
1130 	return hw->aq_fw_ops->set_eeprom(hw, eeprom->data, eeprom->length);
1131 }
1132 
1133 static int
1134 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1135 {
1136 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1137 	u32 mif_id;
1138 	int err;
1139 
1140 	if (regs->data == NULL) {
1141 		regs->length = hw_atl_utils_hw_get_reg_length();
1142 		regs->width = sizeof(u32);
1143 		return 0;
1144 	}
1145 
1146 	/* Only full register dump is supported */
1147 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1148 		return -ENOTSUP;
1149 
1150 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1151 
1152 	/* Device version */
1153 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1154 	regs->version = mif_id & 0xFFU;
1155 
1156 	return err;
1157 }
1158 
1159 static int
1160 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1161 {
1162 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1163 
1164 	if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1165 		fc_conf->mode = RTE_FC_NONE;
1166 	else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1167 		fc_conf->mode = RTE_FC_FULL;
1168 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1169 		fc_conf->mode = RTE_FC_RX_PAUSE;
1170 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1171 		fc_conf->mode = RTE_FC_TX_PAUSE;
1172 
1173 	return 0;
1174 }
1175 
1176 static int
1177 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1178 {
1179 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1180 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1181 
1182 
1183 	if (hw->aq_fw_ops->set_flow_control == NULL)
1184 		return -ENOTSUP;
1185 
1186 	if (fc_conf->mode == RTE_FC_NONE)
1187 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1188 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1189 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1190 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1191 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1192 	else if (fc_conf->mode == RTE_FC_FULL)
1193 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1194 
1195 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1196 		return hw->aq_fw_ops->set_flow_control(hw);
1197 
1198 	return 0;
1199 }
1200 
1201 static int
1202 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1203 		    u8 *mac_addr, bool enable)
1204 {
1205 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1206 	unsigned int h = 0U;
1207 	unsigned int l = 0U;
1208 	int err;
1209 
1210 	if (mac_addr) {
1211 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1212 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1213 			(mac_addr[4] << 8) | mac_addr[5];
1214 	}
1215 
1216 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1217 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1218 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1219 
1220 	if (enable)
1221 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1222 
1223 	err = aq_hw_err_from_flags(hw);
1224 
1225 	return err;
1226 }
1227 
1228 static int
1229 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1230 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1231 {
1232 	if (is_zero_ether_addr(mac_addr)) {
1233 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1234 		return -EINVAL;
1235 	}
1236 
1237 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1238 }
1239 
1240 static void
1241 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1242 {
1243 	atl_update_mac_addr(dev, index, NULL, false);
1244 }
1245 
1246 static int
1247 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1248 {
1249 	atl_remove_mac_addr(dev, 0);
1250 	atl_add_mac_addr(dev, addr, 0, 0);
1251 	return 0;
1252 }
1253 
1254 static int
1255 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1256 {
1257 	struct rte_eth_dev_info dev_info;
1258 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1259 
1260 	atl_dev_info_get(dev, &dev_info);
1261 
1262 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1263 		return -EINVAL;
1264 
1265 	/* update max frame size */
1266 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1267 
1268 	return 0;
1269 }
1270 
1271 static int
1272 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1273 {
1274 	struct aq_hw_cfg_s *cfg =
1275 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1276 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1277 	int err = 0;
1278 	int i = 0;
1279 
1280 	PMD_INIT_FUNC_TRACE();
1281 
1282 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1283 		if (cfg->vlan_filter[i] == vlan_id) {
1284 			if (!on) {
1285 				/* Disable VLAN filter. */
1286 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1287 
1288 				/* Clear VLAN filter entry */
1289 				cfg->vlan_filter[i] = 0;
1290 			}
1291 			break;
1292 		}
1293 	}
1294 
1295 	/* VLAN_ID was not found. So, nothing to delete. */
1296 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1297 		goto exit;
1298 
1299 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1300 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1301 		goto exit;
1302 
1303 	/* Try to found free VLAN filter to add new VLAN_ID */
1304 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1305 		if (cfg->vlan_filter[i] == 0)
1306 			break;
1307 	}
1308 
1309 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1310 		/* We have no free VLAN filter to add new VLAN_ID*/
1311 		err = -ENOMEM;
1312 		goto exit;
1313 	}
1314 
1315 	cfg->vlan_filter[i] = vlan_id;
1316 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1317 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1318 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1319 
1320 exit:
1321 	/* Enable VLAN promisc mode if vlan_filter empty  */
1322 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1323 		if (cfg->vlan_filter[i] != 0)
1324 			break;
1325 	}
1326 
1327 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1328 
1329 	return err;
1330 }
1331 
1332 static int
1333 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1334 {
1335 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1336 	struct aq_hw_cfg_s *cfg =
1337 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1338 	int i;
1339 
1340 	PMD_INIT_FUNC_TRACE();
1341 
1342 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1343 		if (cfg->vlan_filter[i])
1344 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1345 	}
1346 	return 0;
1347 }
1348 
1349 static int
1350 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1351 {
1352 	struct aq_hw_cfg_s *cfg =
1353 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1354 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1355 	int ret = 0;
1356 	int i;
1357 
1358 	PMD_INIT_FUNC_TRACE();
1359 
1360 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1361 
1362 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1363 
1364 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1365 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1366 
1367 	if (mask & ETH_VLAN_EXTEND_MASK)
1368 		ret = -ENOTSUP;
1369 
1370 	return ret;
1371 }
1372 
1373 static int
1374 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1375 		  uint16_t tpid)
1376 {
1377 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1378 	int err = 0;
1379 
1380 	PMD_INIT_FUNC_TRACE();
1381 
1382 	switch (vlan_type) {
1383 	case ETH_VLAN_TYPE_INNER:
1384 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1385 		break;
1386 	case ETH_VLAN_TYPE_OUTER:
1387 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1388 		break;
1389 	default:
1390 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1391 		err = -ENOTSUP;
1392 	}
1393 
1394 	return err;
1395 }
1396 
1397 static void
1398 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1399 {
1400 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1401 
1402 	PMD_INIT_FUNC_TRACE();
1403 
1404 	if (queue_id > dev->data->nb_rx_queues) {
1405 		PMD_DRV_LOG(ERR, "Invalid queue id");
1406 		return;
1407 	}
1408 
1409 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1410 }
1411 
1412 static int
1413 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1414 			  struct ether_addr *mc_addr_set,
1415 			  uint32_t nb_mc_addr)
1416 {
1417 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1418 	u32 i;
1419 
1420 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1421 		return -EINVAL;
1422 
1423 	/* Update whole uc filters table */
1424 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1425 		u8 *mac_addr = NULL;
1426 		u32 l = 0, h = 0;
1427 
1428 		if (i < nb_mc_addr) {
1429 			mac_addr = mc_addr_set[i].addr_bytes;
1430 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1431 				(mac_addr[4] << 8) | mac_addr[5];
1432 			h = (mac_addr[0] << 8) | mac_addr[1];
1433 		}
1434 
1435 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1436 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1437 							HW_ATL_B0_MAC_MIN + i);
1438 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1439 							HW_ATL_B0_MAC_MIN + i);
1440 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1441 					   HW_ATL_B0_MAC_MIN + i);
1442 	}
1443 
1444 	return 0;
1445 }
1446 
1447 static int
1448 atl_reta_update(struct rte_eth_dev *dev,
1449 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1450 		   uint16_t reta_size)
1451 {
1452 	int i;
1453 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1454 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1455 
1456 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1457 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1458 					dev->data->nb_rx_queues - 1);
1459 
1460 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1461 	return 0;
1462 }
1463 
1464 static int
1465 atl_reta_query(struct rte_eth_dev *dev,
1466 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1467 		    uint16_t reta_size)
1468 {
1469 	int i;
1470 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1471 
1472 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1473 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1474 	reta_conf->mask = ~0U;
1475 	return 0;
1476 }
1477 
1478 static int
1479 atl_rss_hash_update(struct rte_eth_dev *dev,
1480 				 struct rte_eth_rss_conf *rss_conf)
1481 {
1482 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1483 	struct aq_hw_cfg_s *cfg =
1484 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1485 	static u8 def_rss_key[40] = {
1486 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1487 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1488 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1489 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1490 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1491 	};
1492 
1493 	cfg->is_rss = !!rss_conf->rss_hf;
1494 	if (rss_conf->rss_key) {
1495 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1496 		       rss_conf->rss_key_len);
1497 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1498 	} else {
1499 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1500 		       sizeof(def_rss_key));
1501 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1502 	}
1503 
1504 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1505 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1506 	return 0;
1507 }
1508 
1509 static int
1510 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1511 				 struct rte_eth_rss_conf *rss_conf)
1512 {
1513 	struct aq_hw_cfg_s *cfg =
1514 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1515 
1516 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1517 	if (rss_conf->rss_key) {
1518 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1519 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1520 		       rss_conf->rss_key_len);
1521 	}
1522 
1523 	return 0;
1524 }
1525 
1526 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1527 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1528 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1529 
1530 RTE_INIT(atl_init_log)
1531 {
1532 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1533 	if (atl_logtype_init >= 0)
1534 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1535 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1536 	if (atl_logtype_driver >= 0)
1537 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1538 }
1539 
1540