xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision 51a071cd8e97d2e7e6ddb7b160aa460292ba608b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_ethdev_pci.h>
6 
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
10 #include "atl_logs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
14 
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
17 
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 			       struct rte_eth_dev_info *dev_info);
48 
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
50 
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
52 
53 /* VLAN stuff */
54 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
55 		uint16_t vlan_id, int on);
56 
57 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
58 
59 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
60 				     uint16_t queue_id, int on);
61 
62 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
63 			     enum rte_vlan_type vlan_type, uint16_t tpid);
64 
65 /* EEPROM */
66 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
67 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
68 			      struct rte_dev_eeprom_info *eeprom);
69 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 
72 /* Regs */
73 static int atl_dev_get_regs(struct rte_eth_dev *dev,
74 			    struct rte_dev_reg_info *regs);
75 
76 /* Flow control */
77 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
78 			       struct rte_eth_fc_conf *fc_conf);
79 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 
82 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
83 
84 /* Interrupts */
85 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
86 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
87 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
88 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
89 				    struct rte_intr_handle *handle);
90 static void atl_dev_interrupt_handler(void *param);
91 
92 
93 static int atl_add_mac_addr(struct rte_eth_dev *dev,
94 			    struct ether_addr *mac_addr,
95 			    uint32_t index, uint32_t pool);
96 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
97 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
98 					   struct ether_addr *mac_addr);
99 
100 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
101 				    struct ether_addr *mc_addr_set,
102 				    uint32_t nb_mc_addr);
103 
104 /* RSS */
105 static int atl_reta_update(struct rte_eth_dev *dev,
106 			     struct rte_eth_rss_reta_entry64 *reta_conf,
107 			     uint16_t reta_size);
108 static int atl_reta_query(struct rte_eth_dev *dev,
109 			    struct rte_eth_rss_reta_entry64 *reta_conf,
110 			    uint16_t reta_size);
111 static int atl_rss_hash_update(struct rte_eth_dev *dev,
112 				 struct rte_eth_rss_conf *rss_conf);
113 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
114 				   struct rte_eth_rss_conf *rss_conf);
115 
116 
117 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
118 	struct rte_pci_device *pci_dev);
119 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
120 
121 static void atl_dev_info_get(struct rte_eth_dev *dev,
122 				struct rte_eth_dev_info *dev_info);
123 
124 int atl_logtype_init;
125 int atl_logtype_driver;
126 
127 /*
128  * The set of PCI devices this driver supports
129  */
130 static const struct rte_pci_id pci_id_atl_map[] = {
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
136 
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
143 
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
150 
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
152 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
153 	{ .vendor_id = 0, /* sentinel */ },
154 };
155 
156 static struct rte_pci_driver rte_atl_pmd = {
157 	.id_table = pci_id_atl_map,
158 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
159 		     RTE_PCI_DRV_IOVA_AS_VA,
160 	.probe = eth_atl_pci_probe,
161 	.remove = eth_atl_pci_remove,
162 };
163 
164 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
165 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
166 			| DEV_RX_OFFLOAD_UDP_CKSUM \
167 			| DEV_RX_OFFLOAD_TCP_CKSUM \
168 			| DEV_RX_OFFLOAD_JUMBO_FRAME)
169 
170 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
171 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
172 			| DEV_TX_OFFLOAD_UDP_CKSUM \
173 			| DEV_TX_OFFLOAD_TCP_CKSUM \
174 			| DEV_TX_OFFLOAD_TCP_TSO \
175 			| DEV_TX_OFFLOAD_MULTI_SEGS)
176 
177 static const struct rte_eth_desc_lim rx_desc_lim = {
178 	.nb_max = ATL_MAX_RING_DESC,
179 	.nb_min = ATL_MIN_RING_DESC,
180 	.nb_align = ATL_RXD_ALIGN,
181 };
182 
183 static const struct rte_eth_desc_lim tx_desc_lim = {
184 	.nb_max = ATL_MAX_RING_DESC,
185 	.nb_min = ATL_MIN_RING_DESC,
186 	.nb_align = ATL_TXD_ALIGN,
187 	.nb_seg_max = ATL_TX_MAX_SEG,
188 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
189 };
190 
191 #define ATL_XSTATS_FIELD(name) { \
192 	#name, \
193 	offsetof(struct aq_stats_s, name) \
194 }
195 
196 struct atl_xstats_tbl_s {
197 	const char *name;
198 	unsigned int offset;
199 };
200 
201 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
202 	ATL_XSTATS_FIELD(uprc),
203 	ATL_XSTATS_FIELD(mprc),
204 	ATL_XSTATS_FIELD(bprc),
205 	ATL_XSTATS_FIELD(erpt),
206 	ATL_XSTATS_FIELD(uptc),
207 	ATL_XSTATS_FIELD(mptc),
208 	ATL_XSTATS_FIELD(bptc),
209 	ATL_XSTATS_FIELD(erpr),
210 	ATL_XSTATS_FIELD(ubrc),
211 	ATL_XSTATS_FIELD(ubtc),
212 	ATL_XSTATS_FIELD(mbrc),
213 	ATL_XSTATS_FIELD(mbtc),
214 	ATL_XSTATS_FIELD(bbrc),
215 	ATL_XSTATS_FIELD(bbtc),
216 };
217 
218 static const struct eth_dev_ops atl_eth_dev_ops = {
219 	.dev_configure	      = atl_dev_configure,
220 	.dev_start	      = atl_dev_start,
221 	.dev_stop	      = atl_dev_stop,
222 	.dev_set_link_up      = atl_dev_set_link_up,
223 	.dev_set_link_down    = atl_dev_set_link_down,
224 	.dev_close	      = atl_dev_close,
225 	.dev_reset	      = atl_dev_reset,
226 
227 	/* PROMISC */
228 	.promiscuous_enable   = atl_dev_promiscuous_enable,
229 	.promiscuous_disable  = atl_dev_promiscuous_disable,
230 	.allmulticast_enable  = atl_dev_allmulticast_enable,
231 	.allmulticast_disable = atl_dev_allmulticast_disable,
232 
233 	/* Link */
234 	.link_update	      = atl_dev_link_update,
235 
236 	.get_reg              = atl_dev_get_regs,
237 
238 	/* Stats */
239 	.stats_get	      = atl_dev_stats_get,
240 	.xstats_get	      = atl_dev_xstats_get,
241 	.xstats_get_names     = atl_dev_xstats_get_names,
242 	.stats_reset	      = atl_dev_stats_reset,
243 	.xstats_reset	      = atl_dev_stats_reset,
244 
245 	.fw_version_get       = atl_fw_version_get,
246 	.dev_infos_get	      = atl_dev_info_get,
247 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
248 
249 	.mtu_set              = atl_dev_mtu_set,
250 
251 	/* VLAN */
252 	.vlan_filter_set      = atl_vlan_filter_set,
253 	.vlan_offload_set     = atl_vlan_offload_set,
254 	.vlan_tpid_set        = atl_vlan_tpid_set,
255 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
256 
257 	/* Queue Control */
258 	.rx_queue_start	      = atl_rx_queue_start,
259 	.rx_queue_stop	      = atl_rx_queue_stop,
260 	.rx_queue_setup       = atl_rx_queue_setup,
261 	.rx_queue_release     = atl_rx_queue_release,
262 
263 	.tx_queue_start	      = atl_tx_queue_start,
264 	.tx_queue_stop	      = atl_tx_queue_stop,
265 	.tx_queue_setup       = atl_tx_queue_setup,
266 	.tx_queue_release     = atl_tx_queue_release,
267 
268 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
269 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
270 
271 	.rx_queue_count       = atl_rx_queue_count,
272 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
273 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
274 
275 	/* EEPROM */
276 	.get_eeprom_length    = atl_dev_get_eeprom_length,
277 	.get_eeprom           = atl_dev_get_eeprom,
278 	.set_eeprom           = atl_dev_set_eeprom,
279 
280 	/* Flow Control */
281 	.flow_ctrl_get	      = atl_flow_ctrl_get,
282 	.flow_ctrl_set	      = atl_flow_ctrl_set,
283 
284 	/* MAC */
285 	.mac_addr_add	      = atl_add_mac_addr,
286 	.mac_addr_remove      = atl_remove_mac_addr,
287 	.mac_addr_set	      = atl_set_default_mac_addr,
288 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
289 	.rxq_info_get	      = atl_rxq_info_get,
290 	.txq_info_get	      = atl_txq_info_get,
291 
292 	.reta_update          = atl_reta_update,
293 	.reta_query           = atl_reta_query,
294 	.rss_hash_update      = atl_rss_hash_update,
295 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
296 };
297 
298 static inline int32_t
299 atl_reset_hw(struct aq_hw_s *hw)
300 {
301 	return hw_atl_b0_hw_reset(hw);
302 }
303 
304 static inline void
305 atl_enable_intr(struct rte_eth_dev *dev)
306 {
307 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
308 
309 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
310 }
311 
312 static void
313 atl_disable_intr(struct aq_hw_s *hw)
314 {
315 	PMD_INIT_FUNC_TRACE();
316 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
317 }
318 
319 static int
320 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
321 {
322 	struct atl_adapter *adapter =
323 		(struct atl_adapter *)eth_dev->data->dev_private;
324 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
325 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
326 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
327 	int err = 0;
328 
329 	PMD_INIT_FUNC_TRACE();
330 
331 	eth_dev->dev_ops = &atl_eth_dev_ops;
332 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
333 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
334 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
335 
336 	/* For secondary processes, the primary process has done all the work */
337 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
338 		return 0;
339 
340 	/* Vendor and Device ID need to be set before init of shared code */
341 	hw->device_id = pci_dev->id.device_id;
342 	hw->vendor_id = pci_dev->id.vendor_id;
343 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
344 
345 	/* Hardware configuration - hardcode */
346 	adapter->hw_cfg.is_lro = false;
347 	adapter->hw_cfg.wol = false;
348 	adapter->hw_cfg.is_rss = false;
349 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
350 
351 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
352 			  AQ_NIC_RATE_5G |
353 			  AQ_NIC_RATE_2G5 |
354 			  AQ_NIC_RATE_1G |
355 			  AQ_NIC_RATE_100M;
356 
357 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
358 	adapter->hw_cfg.aq_rss.indirection_table_size =
359 		HW_ATL_B0_RSS_REDIRECTION_MAX;
360 
361 	hw->aq_nic_cfg = &adapter->hw_cfg;
362 
363 	/* disable interrupt */
364 	atl_disable_intr(hw);
365 
366 	/* Allocate memory for storing MAC addresses */
367 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
368 	if (eth_dev->data->mac_addrs == NULL) {
369 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
370 		return -ENOMEM;
371 	}
372 
373 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
374 	if (err)
375 		return err;
376 
377 	/* Copy the permanent MAC address */
378 	if (hw->aq_fw_ops->get_mac_permanent(hw,
379 			eth_dev->data->mac_addrs->addr_bytes) != 0)
380 		return -EINVAL;
381 
382 	/* Reset the hw statistics */
383 	atl_dev_stats_reset(eth_dev);
384 
385 	rte_intr_callback_register(intr_handle,
386 				   atl_dev_interrupt_handler, eth_dev);
387 
388 	/* enable uio/vfio intr/eventfd mapping */
389 	rte_intr_enable(intr_handle);
390 
391 	/* enable support intr */
392 	atl_enable_intr(eth_dev);
393 
394 	return err;
395 }
396 
397 static int
398 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
399 {
400 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
401 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
402 	struct aq_hw_s *hw;
403 
404 	PMD_INIT_FUNC_TRACE();
405 
406 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
407 		return -EPERM;
408 
409 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
410 
411 	if (hw->adapter_stopped == 0)
412 		atl_dev_close(eth_dev);
413 
414 	eth_dev->dev_ops = NULL;
415 	eth_dev->rx_pkt_burst = NULL;
416 	eth_dev->tx_pkt_burst = NULL;
417 
418 	/* disable uio intr before callback unregister */
419 	rte_intr_disable(intr_handle);
420 	rte_intr_callback_unregister(intr_handle,
421 				     atl_dev_interrupt_handler, eth_dev);
422 
423 	rte_free(eth_dev->data->mac_addrs);
424 	eth_dev->data->mac_addrs = NULL;
425 
426 	return 0;
427 }
428 
429 static int
430 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
431 	struct rte_pci_device *pci_dev)
432 {
433 	return rte_eth_dev_pci_generic_probe(pci_dev,
434 		sizeof(struct atl_adapter), eth_atl_dev_init);
435 }
436 
437 static int
438 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
439 {
440 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
441 }
442 
443 static int
444 atl_dev_configure(struct rte_eth_dev *dev)
445 {
446 	struct atl_interrupt *intr =
447 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
448 
449 	PMD_INIT_FUNC_TRACE();
450 
451 	/* set flag to update link status after init */
452 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
453 
454 	return 0;
455 }
456 
457 /*
458  * Configure device link speed and setup link.
459  * It returns 0 on success.
460  */
461 static int
462 atl_dev_start(struct rte_eth_dev *dev)
463 {
464 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
465 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
466 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
467 	uint32_t intr_vector = 0;
468 	int status;
469 	int err;
470 
471 	PMD_INIT_FUNC_TRACE();
472 
473 	/* set adapter started */
474 	hw->adapter_stopped = 0;
475 
476 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
477 		PMD_INIT_LOG(ERR,
478 		"Invalid link_speeds for port %u, fix speed not supported",
479 				dev->data->port_id);
480 		return -EINVAL;
481 	}
482 
483 	/* disable uio/vfio intr/eventfd mapping */
484 	rte_intr_disable(intr_handle);
485 
486 	/* reinitialize adapter
487 	 * this calls reset and start
488 	 */
489 	status = atl_reset_hw(hw);
490 	if (status != 0)
491 		return -EIO;
492 
493 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
494 
495 	hw_atl_b0_hw_start(hw);
496 	/* check and configure queue intr-vector mapping */
497 	if ((rte_intr_cap_multiple(intr_handle) ||
498 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
499 	    dev->data->dev_conf.intr_conf.rxq != 0) {
500 		intr_vector = dev->data->nb_rx_queues;
501 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
502 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
503 					ATL_MAX_INTR_QUEUE_NUM);
504 			return -ENOTSUP;
505 		}
506 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
507 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
508 			return -1;
509 		}
510 	}
511 
512 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
513 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
514 				    dev->data->nb_rx_queues * sizeof(int), 0);
515 		if (intr_handle->intr_vec == NULL) {
516 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
517 				     " intr_vec", dev->data->nb_rx_queues);
518 			return -ENOMEM;
519 		}
520 	}
521 
522 	/* initialize transmission unit */
523 	atl_tx_init(dev);
524 
525 	/* This can fail when allocating mbufs for descriptor rings */
526 	err = atl_rx_init(dev);
527 	if (err) {
528 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
529 		goto error;
530 	}
531 
532 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
533 		hw->fw_ver_actual >> 24,
534 		(hw->fw_ver_actual >> 16) & 0xFF,
535 		hw->fw_ver_actual & 0xFFFF);
536 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
537 
538 	err = atl_start_queues(dev);
539 	if (err < 0) {
540 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
541 		goto error;
542 	}
543 
544 	err = atl_dev_set_link_up(dev);
545 
546 	err = hw->aq_fw_ops->update_link_status(hw);
547 
548 	if (err)
549 		goto error;
550 
551 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
552 
553 	if (err)
554 		goto error;
555 
556 	if (rte_intr_allow_others(intr_handle)) {
557 		/* check if lsc interrupt is enabled */
558 		if (dev->data->dev_conf.intr_conf.lsc != 0)
559 			atl_dev_lsc_interrupt_setup(dev, true);
560 		else
561 			atl_dev_lsc_interrupt_setup(dev, false);
562 	} else {
563 		rte_intr_callback_unregister(intr_handle,
564 					     atl_dev_interrupt_handler, dev);
565 		if (dev->data->dev_conf.intr_conf.lsc != 0)
566 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
567 				     " no intr multiplex");
568 	}
569 
570 	/* check if rxq interrupt is enabled */
571 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
572 	    rte_intr_dp_is_en(intr_handle))
573 		atl_dev_rxq_interrupt_setup(dev);
574 
575 	/* enable uio/vfio intr/eventfd mapping */
576 	rte_intr_enable(intr_handle);
577 
578 	/* resume enabled intr since hw reset */
579 	atl_enable_intr(dev);
580 
581 	return 0;
582 
583 error:
584 	atl_stop_queues(dev);
585 	return -EIO;
586 }
587 
588 /*
589  * Stop device: disable rx and tx functions to allow for reconfiguring.
590  */
591 static void
592 atl_dev_stop(struct rte_eth_dev *dev)
593 {
594 	struct rte_eth_link link;
595 	struct aq_hw_s *hw =
596 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
597 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
598 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
599 
600 	PMD_INIT_FUNC_TRACE();
601 
602 	/* disable interrupts */
603 	atl_disable_intr(hw);
604 
605 	/* reset the NIC */
606 	atl_reset_hw(hw);
607 	hw->adapter_stopped = 1;
608 
609 	atl_stop_queues(dev);
610 
611 	/* Clear stored conf */
612 	dev->data->scattered_rx = 0;
613 	dev->data->lro = 0;
614 
615 	/* Clear recorded link status */
616 	memset(&link, 0, sizeof(link));
617 	rte_eth_linkstatus_set(dev, &link);
618 
619 	if (!rte_intr_allow_others(intr_handle))
620 		/* resume to the default handler */
621 		rte_intr_callback_register(intr_handle,
622 					   atl_dev_interrupt_handler,
623 					   (void *)dev);
624 
625 	/* Clean datapath event and queue/vec mapping */
626 	rte_intr_efd_disable(intr_handle);
627 	if (intr_handle->intr_vec != NULL) {
628 		rte_free(intr_handle->intr_vec);
629 		intr_handle->intr_vec = NULL;
630 	}
631 }
632 
633 /*
634  * Set device link up: enable tx.
635  */
636 static int
637 atl_dev_set_link_up(struct rte_eth_dev *dev)
638 {
639 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
640 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
641 	uint32_t speed_mask = 0;
642 
643 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
644 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
645 	} else {
646 		if (link_speeds & ETH_LINK_SPEED_10G)
647 			speed_mask |= AQ_NIC_RATE_10G;
648 		if (link_speeds & ETH_LINK_SPEED_5G)
649 			speed_mask |= AQ_NIC_RATE_5G;
650 		if (link_speeds & ETH_LINK_SPEED_1G)
651 			speed_mask |= AQ_NIC_RATE_1G;
652 		if (link_speeds & ETH_LINK_SPEED_2_5G)
653 			speed_mask |=  AQ_NIC_RATE_2G5;
654 		if (link_speeds & ETH_LINK_SPEED_100M)
655 			speed_mask |= AQ_NIC_RATE_100M;
656 	}
657 
658 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
659 }
660 
661 /*
662  * Set device link down: disable tx.
663  */
664 static int
665 atl_dev_set_link_down(struct rte_eth_dev *dev)
666 {
667 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
668 
669 	return hw->aq_fw_ops->set_link_speed(hw, 0);
670 }
671 
672 /*
673  * Reset and stop device.
674  */
675 static void
676 atl_dev_close(struct rte_eth_dev *dev)
677 {
678 	PMD_INIT_FUNC_TRACE();
679 
680 	atl_dev_stop(dev);
681 
682 	atl_free_queues(dev);
683 }
684 
685 static int
686 atl_dev_reset(struct rte_eth_dev *dev)
687 {
688 	int ret;
689 
690 	ret = eth_atl_dev_uninit(dev);
691 	if (ret)
692 		return ret;
693 
694 	ret = eth_atl_dev_init(dev);
695 
696 	return ret;
697 }
698 
699 
700 static int
701 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
702 {
703 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
704 	struct aq_hw_s *hw = &adapter->hw;
705 	struct atl_sw_stats *swstats = &adapter->sw_stats;
706 	unsigned int i;
707 
708 	hw->aq_fw_ops->update_stats(hw);
709 
710 	/* Fill out the rte_eth_stats statistics structure */
711 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
712 	stats->ibytes = hw->curr_stats.dma_oct_rc;
713 	stats->imissed = hw->curr_stats.dpc;
714 	stats->ierrors = hw->curr_stats.erpt;
715 
716 	stats->opackets = hw->curr_stats.dma_pkt_tc;
717 	stats->obytes = hw->curr_stats.dma_oct_tc;
718 	stats->oerrors = 0;
719 
720 	stats->rx_nombuf = swstats->rx_nombuf;
721 
722 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
723 		stats->q_ipackets[i] = swstats->q_ipackets[i];
724 		stats->q_opackets[i] = swstats->q_opackets[i];
725 		stats->q_ibytes[i] = swstats->q_ibytes[i];
726 		stats->q_obytes[i] = swstats->q_obytes[i];
727 		stats->q_errors[i] = swstats->q_errors[i];
728 	}
729 	return 0;
730 }
731 
732 static void
733 atl_dev_stats_reset(struct rte_eth_dev *dev)
734 {
735 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
736 	struct aq_hw_s *hw = &adapter->hw;
737 
738 	hw->aq_fw_ops->update_stats(hw);
739 
740 	/* Reset software totals */
741 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
742 
743 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
744 }
745 
746 static int
747 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
748 			 struct rte_eth_xstat_name *xstats_names,
749 			 unsigned int size)
750 {
751 	unsigned int i;
752 
753 	if (!xstats_names)
754 		return RTE_DIM(atl_xstats_tbl);
755 
756 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
757 		snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
758 			atl_xstats_tbl[i].name);
759 
760 	return size;
761 }
762 
763 static int
764 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
765 		   unsigned int n)
766 {
767 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
768 	struct aq_hw_s *hw = &adapter->hw;
769 	unsigned int i;
770 
771 	if (!stats)
772 		return 0;
773 
774 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
775 		stats[i].id = i;
776 		stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
777 					atl_xstats_tbl[i].offset);
778 	}
779 
780 	return n;
781 }
782 
783 static int
784 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
785 {
786 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
787 	uint32_t fw_ver = 0;
788 	unsigned int ret = 0;
789 
790 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
791 	if (ret)
792 		return -EIO;
793 
794 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
795 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
796 
797 	ret += 1; /* add string null-terminator */
798 
799 	if (fw_size < ret)
800 		return ret;
801 
802 	return 0;
803 }
804 
805 static void
806 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
807 {
808 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
809 
810 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
811 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
812 
813 	dev_info->min_rx_bufsize = 1024;
814 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
815 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
816 	dev_info->max_vfs = pci_dev->max_vfs;
817 
818 	dev_info->max_hash_mac_addrs = 0;
819 	dev_info->max_vmdq_pools = 0;
820 	dev_info->vmdq_queue_num = 0;
821 
822 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
823 
824 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
825 
826 
827 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
828 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
829 	};
830 
831 	dev_info->default_txconf = (struct rte_eth_txconf) {
832 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
833 	};
834 
835 	dev_info->rx_desc_lim = rx_desc_lim;
836 	dev_info->tx_desc_lim = tx_desc_lim;
837 
838 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
839 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
840 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
841 
842 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
843 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
844 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
845 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
846 }
847 
848 static const uint32_t *
849 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
850 {
851 	static const uint32_t ptypes[] = {
852 		RTE_PTYPE_L2_ETHER,
853 		RTE_PTYPE_L2_ETHER_ARP,
854 		RTE_PTYPE_L2_ETHER_VLAN,
855 		RTE_PTYPE_L3_IPV4,
856 		RTE_PTYPE_L3_IPV6,
857 		RTE_PTYPE_L4_TCP,
858 		RTE_PTYPE_L4_UDP,
859 		RTE_PTYPE_L4_SCTP,
860 		RTE_PTYPE_L4_ICMP,
861 		RTE_PTYPE_UNKNOWN
862 	};
863 
864 	if (dev->rx_pkt_burst == atl_recv_pkts)
865 		return ptypes;
866 
867 	return NULL;
868 }
869 
870 /* return 0 means link status changed, -1 means not changed */
871 static int
872 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
873 {
874 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
875 	struct atl_interrupt *intr =
876 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
877 	struct rte_eth_link link, old;
878 	int err = 0;
879 
880 	link.link_status = ETH_LINK_DOWN;
881 	link.link_speed = 0;
882 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
883 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
884 	memset(&old, 0, sizeof(old));
885 
886 	/* load old link status */
887 	rte_eth_linkstatus_get(dev, &old);
888 
889 	/* read current link status */
890 	err = hw->aq_fw_ops->update_link_status(hw);
891 
892 	if (err)
893 		return 0;
894 
895 	if (hw->aq_link_status.mbps == 0) {
896 		/* write default (down) link status */
897 		rte_eth_linkstatus_set(dev, &link);
898 		if (link.link_status == old.link_status)
899 			return -1;
900 		return 0;
901 	}
902 
903 	intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
904 
905 	link.link_status = ETH_LINK_UP;
906 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
907 	link.link_speed = hw->aq_link_status.mbps;
908 
909 	rte_eth_linkstatus_set(dev, &link);
910 
911 	if (link.link_status == old.link_status)
912 		return -1;
913 
914 	return 0;
915 }
916 
917 static void
918 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
919 {
920 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
921 
922 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
923 }
924 
925 static void
926 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
927 {
928 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
929 
930 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
931 }
932 
933 static void
934 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
935 {
936 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
937 
938 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
939 }
940 
941 static void
942 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
943 {
944 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
945 
946 	if (dev->data->promiscuous == 1)
947 		return; /* must remain in all_multicast mode */
948 
949 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
950 }
951 
952 /**
953  * It clears the interrupt causes and enables the interrupt.
954  * It will be called once only during nic initialized.
955  *
956  * @param dev
957  *  Pointer to struct rte_eth_dev.
958  * @param on
959  *  Enable or Disable.
960  *
961  * @return
962  *  - On success, zero.
963  *  - On failure, a negative value.
964  */
965 
966 static int
967 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
968 {
969 	atl_dev_link_status_print(dev);
970 	return 0;
971 }
972 
973 static int
974 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
975 {
976 	return 0;
977 }
978 
979 
980 static int
981 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
982 {
983 	struct atl_interrupt *intr =
984 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
985 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
986 	u64 cause = 0;
987 
988 	hw_atl_b0_hw_irq_read(hw, &cause);
989 
990 	atl_disable_intr(hw);
991 	intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
992 			ATL_FLAG_NEED_LINK_UPDATE : 0;
993 
994 	return 0;
995 }
996 
997 /**
998  * It gets and then prints the link status.
999  *
1000  * @param dev
1001  *  Pointer to struct rte_eth_dev.
1002  *
1003  * @return
1004  *  - On success, zero.
1005  *  - On failure, a negative value.
1006  */
1007 static void
1008 atl_dev_link_status_print(struct rte_eth_dev *dev)
1009 {
1010 	struct rte_eth_link link;
1011 
1012 	memset(&link, 0, sizeof(link));
1013 	rte_eth_linkstatus_get(dev, &link);
1014 	if (link.link_status) {
1015 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1016 					(int)(dev->data->port_id),
1017 					(unsigned int)link.link_speed,
1018 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1019 					"full-duplex" : "half-duplex");
1020 	} else {
1021 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1022 				(int)(dev->data->port_id));
1023 	}
1024 
1025 
1026 #ifdef DEBUG
1027 {
1028 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1029 
1030 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1031 				pci_dev->addr.domain,
1032 				pci_dev->addr.bus,
1033 				pci_dev->addr.devid,
1034 				pci_dev->addr.function);
1035 }
1036 #endif
1037 
1038 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1039 }
1040 
1041 /*
1042  * It executes link_update after knowing an interrupt occurred.
1043  *
1044  * @param dev
1045  *  Pointer to struct rte_eth_dev.
1046  *
1047  * @return
1048  *  - On success, zero.
1049  *  - On failure, a negative value.
1050  */
1051 static int
1052 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1053 			   struct rte_intr_handle *intr_handle)
1054 {
1055 	struct atl_interrupt *intr =
1056 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1057 
1058 	if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1059 		atl_dev_link_update(dev, 0);
1060 		intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1061 		atl_dev_link_status_print(dev);
1062 		_rte_eth_dev_callback_process(dev,
1063 			RTE_ETH_EVENT_INTR_LSC, NULL);
1064 	}
1065 
1066 	atl_enable_intr(dev);
1067 	rte_intr_enable(intr_handle);
1068 
1069 	return 0;
1070 }
1071 
1072 /**
1073  * Interrupt handler triggered by NIC  for handling
1074  * specific interrupt.
1075  *
1076  * @param handle
1077  *  Pointer to interrupt handle.
1078  * @param param
1079  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1080  *
1081  * @return
1082  *  void
1083  */
1084 static void
1085 atl_dev_interrupt_handler(void *param)
1086 {
1087 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1088 
1089 	atl_dev_interrupt_get_status(dev);
1090 	atl_dev_interrupt_action(dev, dev->intr_handle);
1091 }
1092 
1093 #define SFP_EEPROM_SIZE 0xff
1094 
1095 static int
1096 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1097 {
1098 	return SFP_EEPROM_SIZE;
1099 }
1100 
1101 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1102 		       struct rte_dev_eeprom_info *eeprom)
1103 {
1104 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1105 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1106 
1107 	if (hw->aq_fw_ops->get_eeprom == NULL)
1108 		return -ENOTSUP;
1109 
1110 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1111 	    eeprom->data == NULL)
1112 		return -EINVAL;
1113 
1114 	if (eeprom->magic)
1115 		dev_addr = eeprom->magic;
1116 
1117 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1118 					 eeprom->length, eeprom->offset);
1119 }
1120 
1121 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1122 		       struct rte_dev_eeprom_info *eeprom)
1123 {
1124 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1125 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1126 
1127 	if (hw->aq_fw_ops->set_eeprom == NULL)
1128 		return -ENOTSUP;
1129 
1130 	if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1131 		return -EINVAL;
1132 
1133 	if (eeprom->magic)
1134 		dev_addr = eeprom->magic;
1135 
1136 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr,
1137 					 eeprom->data, eeprom->length);
1138 }
1139 
1140 static int
1141 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1142 {
1143 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1144 	u32 mif_id;
1145 	int err;
1146 
1147 	if (regs->data == NULL) {
1148 		regs->length = hw_atl_utils_hw_get_reg_length();
1149 		regs->width = sizeof(u32);
1150 		return 0;
1151 	}
1152 
1153 	/* Only full register dump is supported */
1154 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1155 		return -ENOTSUP;
1156 
1157 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1158 
1159 	/* Device version */
1160 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1161 	regs->version = mif_id & 0xFFU;
1162 
1163 	return err;
1164 }
1165 
1166 static int
1167 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1168 {
1169 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1170 
1171 	if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1172 		fc_conf->mode = RTE_FC_NONE;
1173 	else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1174 		fc_conf->mode = RTE_FC_FULL;
1175 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1176 		fc_conf->mode = RTE_FC_RX_PAUSE;
1177 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1178 		fc_conf->mode = RTE_FC_TX_PAUSE;
1179 
1180 	return 0;
1181 }
1182 
1183 static int
1184 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1185 {
1186 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1187 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1188 
1189 
1190 	if (hw->aq_fw_ops->set_flow_control == NULL)
1191 		return -ENOTSUP;
1192 
1193 	if (fc_conf->mode == RTE_FC_NONE)
1194 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1195 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1196 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1197 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1198 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1199 	else if (fc_conf->mode == RTE_FC_FULL)
1200 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1201 
1202 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1203 		return hw->aq_fw_ops->set_flow_control(hw);
1204 
1205 	return 0;
1206 }
1207 
1208 static int
1209 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1210 		    u8 *mac_addr, bool enable)
1211 {
1212 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1213 	unsigned int h = 0U;
1214 	unsigned int l = 0U;
1215 	int err;
1216 
1217 	if (mac_addr) {
1218 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1219 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1220 			(mac_addr[4] << 8) | mac_addr[5];
1221 	}
1222 
1223 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1224 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1225 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1226 
1227 	if (enable)
1228 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1229 
1230 	err = aq_hw_err_from_flags(hw);
1231 
1232 	return err;
1233 }
1234 
1235 static int
1236 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1237 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1238 {
1239 	if (is_zero_ether_addr(mac_addr)) {
1240 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1241 		return -EINVAL;
1242 	}
1243 
1244 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1245 }
1246 
1247 static void
1248 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1249 {
1250 	atl_update_mac_addr(dev, index, NULL, false);
1251 }
1252 
1253 static int
1254 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1255 {
1256 	atl_remove_mac_addr(dev, 0);
1257 	atl_add_mac_addr(dev, addr, 0, 0);
1258 	return 0;
1259 }
1260 
1261 static int
1262 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1263 {
1264 	struct rte_eth_dev_info dev_info;
1265 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1266 
1267 	atl_dev_info_get(dev, &dev_info);
1268 
1269 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1270 		return -EINVAL;
1271 
1272 	/* update max frame size */
1273 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1274 
1275 	return 0;
1276 }
1277 
1278 static int
1279 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1280 {
1281 	struct aq_hw_cfg_s *cfg =
1282 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1283 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1284 	int err = 0;
1285 	int i = 0;
1286 
1287 	PMD_INIT_FUNC_TRACE();
1288 
1289 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1290 		if (cfg->vlan_filter[i] == vlan_id) {
1291 			if (!on) {
1292 				/* Disable VLAN filter. */
1293 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1294 
1295 				/* Clear VLAN filter entry */
1296 				cfg->vlan_filter[i] = 0;
1297 			}
1298 			break;
1299 		}
1300 	}
1301 
1302 	/* VLAN_ID was not found. So, nothing to delete. */
1303 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1304 		goto exit;
1305 
1306 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1307 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1308 		goto exit;
1309 
1310 	/* Try to found free VLAN filter to add new VLAN_ID */
1311 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1312 		if (cfg->vlan_filter[i] == 0)
1313 			break;
1314 	}
1315 
1316 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1317 		/* We have no free VLAN filter to add new VLAN_ID*/
1318 		err = -ENOMEM;
1319 		goto exit;
1320 	}
1321 
1322 	cfg->vlan_filter[i] = vlan_id;
1323 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1324 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1325 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1326 
1327 exit:
1328 	/* Enable VLAN promisc mode if vlan_filter empty  */
1329 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1330 		if (cfg->vlan_filter[i] != 0)
1331 			break;
1332 	}
1333 
1334 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1335 
1336 	return err;
1337 }
1338 
1339 static int
1340 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1341 {
1342 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1343 	struct aq_hw_cfg_s *cfg =
1344 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1345 	int i;
1346 
1347 	PMD_INIT_FUNC_TRACE();
1348 
1349 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1350 		if (cfg->vlan_filter[i])
1351 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1352 	}
1353 	return 0;
1354 }
1355 
1356 static int
1357 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1358 {
1359 	struct aq_hw_cfg_s *cfg =
1360 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1361 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1362 	int ret = 0;
1363 	int i;
1364 
1365 	PMD_INIT_FUNC_TRACE();
1366 
1367 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1368 
1369 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1370 
1371 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1372 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1373 
1374 	if (mask & ETH_VLAN_EXTEND_MASK)
1375 		ret = -ENOTSUP;
1376 
1377 	return ret;
1378 }
1379 
1380 static int
1381 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1382 		  uint16_t tpid)
1383 {
1384 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1385 	int err = 0;
1386 
1387 	PMD_INIT_FUNC_TRACE();
1388 
1389 	switch (vlan_type) {
1390 	case ETH_VLAN_TYPE_INNER:
1391 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1392 		break;
1393 	case ETH_VLAN_TYPE_OUTER:
1394 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1395 		break;
1396 	default:
1397 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1398 		err = -ENOTSUP;
1399 	}
1400 
1401 	return err;
1402 }
1403 
1404 static void
1405 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1406 {
1407 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1408 
1409 	PMD_INIT_FUNC_TRACE();
1410 
1411 	if (queue_id > dev->data->nb_rx_queues) {
1412 		PMD_DRV_LOG(ERR, "Invalid queue id");
1413 		return;
1414 	}
1415 
1416 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1417 }
1418 
1419 static int
1420 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1421 			  struct ether_addr *mc_addr_set,
1422 			  uint32_t nb_mc_addr)
1423 {
1424 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1425 	u32 i;
1426 
1427 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1428 		return -EINVAL;
1429 
1430 	/* Update whole uc filters table */
1431 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1432 		u8 *mac_addr = NULL;
1433 		u32 l = 0, h = 0;
1434 
1435 		if (i < nb_mc_addr) {
1436 			mac_addr = mc_addr_set[i].addr_bytes;
1437 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1438 				(mac_addr[4] << 8) | mac_addr[5];
1439 			h = (mac_addr[0] << 8) | mac_addr[1];
1440 		}
1441 
1442 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1443 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1444 							HW_ATL_B0_MAC_MIN + i);
1445 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1446 							HW_ATL_B0_MAC_MIN + i);
1447 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1448 					   HW_ATL_B0_MAC_MIN + i);
1449 	}
1450 
1451 	return 0;
1452 }
1453 
1454 static int
1455 atl_reta_update(struct rte_eth_dev *dev,
1456 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1457 		   uint16_t reta_size)
1458 {
1459 	int i;
1460 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1461 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1462 
1463 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1464 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1465 					dev->data->nb_rx_queues - 1);
1466 
1467 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1468 	return 0;
1469 }
1470 
1471 static int
1472 atl_reta_query(struct rte_eth_dev *dev,
1473 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1474 		    uint16_t reta_size)
1475 {
1476 	int i;
1477 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1478 
1479 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1480 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1481 	reta_conf->mask = ~0U;
1482 	return 0;
1483 }
1484 
1485 static int
1486 atl_rss_hash_update(struct rte_eth_dev *dev,
1487 				 struct rte_eth_rss_conf *rss_conf)
1488 {
1489 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1490 	struct aq_hw_cfg_s *cfg =
1491 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1492 	static u8 def_rss_key[40] = {
1493 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1494 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1495 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1496 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1497 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1498 	};
1499 
1500 	cfg->is_rss = !!rss_conf->rss_hf;
1501 	if (rss_conf->rss_key) {
1502 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1503 		       rss_conf->rss_key_len);
1504 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1505 	} else {
1506 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1507 		       sizeof(def_rss_key));
1508 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1509 	}
1510 
1511 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1512 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1513 	return 0;
1514 }
1515 
1516 static int
1517 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1518 				 struct rte_eth_rss_conf *rss_conf)
1519 {
1520 	struct aq_hw_cfg_s *cfg =
1521 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1522 
1523 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1524 	if (rss_conf->rss_key) {
1525 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1526 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1527 		       rss_conf->rss_key_len);
1528 	}
1529 
1530 	return 0;
1531 }
1532 
1533 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1534 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1535 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1536 
1537 RTE_INIT(atl_init_log)
1538 {
1539 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1540 	if (atl_logtype_init >= 0)
1541 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1542 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1543 	if (atl_logtype_driver >= 0)
1544 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1545 }
1546 
1547