xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision 9f7e206a4cfd325a7465f1e61dbb66fcf8a54b14)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 				    struct rte_eth_xstat_name *xstats_names,
35 				    unsigned int size);
36 
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 				struct rte_eth_stats *stats);
39 
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 			      struct rte_eth_xstat *stats, unsigned int n);
42 
43 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
44 
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 			      size_t fw_size);
47 
48 static void atl_dev_info_get(struct rte_eth_dev *dev,
49 			       struct rte_eth_dev_info *dev_info);
50 
51 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 
53 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 
55 /* VLAN stuff */
56 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
57 		uint16_t vlan_id, int on);
58 
59 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
60 
61 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
62 				     uint16_t queue_id, int on);
63 
64 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
65 			     enum rte_vlan_type vlan_type, uint16_t tpid);
66 
67 /* EEPROM */
68 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
69 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
72 			      struct rte_dev_eeprom_info *eeprom);
73 
74 /* Regs */
75 static int atl_dev_get_regs(struct rte_eth_dev *dev,
76 			    struct rte_dev_reg_info *regs);
77 
78 /* Flow control */
79 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
82 			       struct rte_eth_fc_conf *fc_conf);
83 
84 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
85 
86 /* Interrupts */
87 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
88 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
89 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
90 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
91 				    struct rte_intr_handle *handle);
92 static void atl_dev_interrupt_handler(void *param);
93 
94 
95 static int atl_add_mac_addr(struct rte_eth_dev *dev,
96 			    struct ether_addr *mac_addr,
97 			    uint32_t index, uint32_t pool);
98 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
99 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
100 					   struct ether_addr *mac_addr);
101 
102 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
103 				    struct ether_addr *mc_addr_set,
104 				    uint32_t nb_mc_addr);
105 
106 /* RSS */
107 static int atl_reta_update(struct rte_eth_dev *dev,
108 			     struct rte_eth_rss_reta_entry64 *reta_conf,
109 			     uint16_t reta_size);
110 static int atl_reta_query(struct rte_eth_dev *dev,
111 			    struct rte_eth_rss_reta_entry64 *reta_conf,
112 			    uint16_t reta_size);
113 static int atl_rss_hash_update(struct rte_eth_dev *dev,
114 				 struct rte_eth_rss_conf *rss_conf);
115 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
116 				   struct rte_eth_rss_conf *rss_conf);
117 
118 
119 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
120 	struct rte_pci_device *pci_dev);
121 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
122 
123 static void atl_dev_info_get(struct rte_eth_dev *dev,
124 				struct rte_eth_dev_info *dev_info);
125 
126 int atl_logtype_init;
127 int atl_logtype_driver;
128 
129 /*
130  * The set of PCI devices this driver supports
131  */
132 static const struct rte_pci_id pci_id_atl_map[] = {
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
138 
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
145 
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
152 
153 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
154 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
155 	{ .vendor_id = 0, /* sentinel */ },
156 };
157 
158 static struct rte_pci_driver rte_atl_pmd = {
159 	.id_table = pci_id_atl_map,
160 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
161 		     RTE_PCI_DRV_IOVA_AS_VA,
162 	.probe = eth_atl_pci_probe,
163 	.remove = eth_atl_pci_remove,
164 };
165 
166 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
167 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
168 			| DEV_RX_OFFLOAD_UDP_CKSUM \
169 			| DEV_RX_OFFLOAD_TCP_CKSUM \
170 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
171 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
172 			| DEV_RX_OFFLOAD_VLAN_FILTER)
173 
174 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
175 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
176 			| DEV_TX_OFFLOAD_UDP_CKSUM \
177 			| DEV_TX_OFFLOAD_TCP_CKSUM \
178 			| DEV_TX_OFFLOAD_TCP_TSO \
179 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
180 			| DEV_TX_OFFLOAD_MULTI_SEGS)
181 
182 static const struct rte_eth_desc_lim rx_desc_lim = {
183 	.nb_max = ATL_MAX_RING_DESC,
184 	.nb_min = ATL_MIN_RING_DESC,
185 	.nb_align = ATL_RXD_ALIGN,
186 };
187 
188 static const struct rte_eth_desc_lim tx_desc_lim = {
189 	.nb_max = ATL_MAX_RING_DESC,
190 	.nb_min = ATL_MIN_RING_DESC,
191 	.nb_align = ATL_TXD_ALIGN,
192 	.nb_seg_max = ATL_TX_MAX_SEG,
193 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
194 };
195 
196 #define ATL_XSTATS_FIELD(name) { \
197 	#name, \
198 	offsetof(struct aq_stats_s, name) \
199 }
200 
201 struct atl_xstats_tbl_s {
202 	const char *name;
203 	unsigned int offset;
204 };
205 
206 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
207 	ATL_XSTATS_FIELD(uprc),
208 	ATL_XSTATS_FIELD(mprc),
209 	ATL_XSTATS_FIELD(bprc),
210 	ATL_XSTATS_FIELD(erpt),
211 	ATL_XSTATS_FIELD(uptc),
212 	ATL_XSTATS_FIELD(mptc),
213 	ATL_XSTATS_FIELD(bptc),
214 	ATL_XSTATS_FIELD(erpr),
215 	ATL_XSTATS_FIELD(ubrc),
216 	ATL_XSTATS_FIELD(ubtc),
217 	ATL_XSTATS_FIELD(mbrc),
218 	ATL_XSTATS_FIELD(mbtc),
219 	ATL_XSTATS_FIELD(bbrc),
220 	ATL_XSTATS_FIELD(bbtc),
221 };
222 
223 static const struct eth_dev_ops atl_eth_dev_ops = {
224 	.dev_configure	      = atl_dev_configure,
225 	.dev_start	      = atl_dev_start,
226 	.dev_stop	      = atl_dev_stop,
227 	.dev_set_link_up      = atl_dev_set_link_up,
228 	.dev_set_link_down    = atl_dev_set_link_down,
229 	.dev_close	      = atl_dev_close,
230 	.dev_reset	      = atl_dev_reset,
231 
232 	/* PROMISC */
233 	.promiscuous_enable   = atl_dev_promiscuous_enable,
234 	.promiscuous_disable  = atl_dev_promiscuous_disable,
235 	.allmulticast_enable  = atl_dev_allmulticast_enable,
236 	.allmulticast_disable = atl_dev_allmulticast_disable,
237 
238 	/* Link */
239 	.link_update	      = atl_dev_link_update,
240 
241 	.get_reg              = atl_dev_get_regs,
242 
243 	/* Stats */
244 	.stats_get	      = atl_dev_stats_get,
245 	.xstats_get	      = atl_dev_xstats_get,
246 	.xstats_get_names     = atl_dev_xstats_get_names,
247 	.stats_reset	      = atl_dev_stats_reset,
248 	.xstats_reset	      = atl_dev_stats_reset,
249 
250 	.fw_version_get       = atl_fw_version_get,
251 	.dev_infos_get	      = atl_dev_info_get,
252 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
253 
254 	.mtu_set              = atl_dev_mtu_set,
255 
256 	/* VLAN */
257 	.vlan_filter_set      = atl_vlan_filter_set,
258 	.vlan_offload_set     = atl_vlan_offload_set,
259 	.vlan_tpid_set        = atl_vlan_tpid_set,
260 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
261 
262 	/* Queue Control */
263 	.rx_queue_start	      = atl_rx_queue_start,
264 	.rx_queue_stop	      = atl_rx_queue_stop,
265 	.rx_queue_setup       = atl_rx_queue_setup,
266 	.rx_queue_release     = atl_rx_queue_release,
267 
268 	.tx_queue_start	      = atl_tx_queue_start,
269 	.tx_queue_stop	      = atl_tx_queue_stop,
270 	.tx_queue_setup       = atl_tx_queue_setup,
271 	.tx_queue_release     = atl_tx_queue_release,
272 
273 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
274 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
275 
276 	.rx_queue_count       = atl_rx_queue_count,
277 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
278 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
279 
280 	/* EEPROM */
281 	.get_eeprom_length    = atl_dev_get_eeprom_length,
282 	.get_eeprom           = atl_dev_get_eeprom,
283 	.set_eeprom           = atl_dev_set_eeprom,
284 
285 	/* Flow Control */
286 	.flow_ctrl_get	      = atl_flow_ctrl_get,
287 	.flow_ctrl_set	      = atl_flow_ctrl_set,
288 
289 	/* MAC */
290 	.mac_addr_add	      = atl_add_mac_addr,
291 	.mac_addr_remove      = atl_remove_mac_addr,
292 	.mac_addr_set	      = atl_set_default_mac_addr,
293 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
294 	.rxq_info_get	      = atl_rxq_info_get,
295 	.txq_info_get	      = atl_txq_info_get,
296 
297 	.reta_update          = atl_reta_update,
298 	.reta_query           = atl_reta_query,
299 	.rss_hash_update      = atl_rss_hash_update,
300 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
301 };
302 
303 static inline int32_t
304 atl_reset_hw(struct aq_hw_s *hw)
305 {
306 	return hw_atl_b0_hw_reset(hw);
307 }
308 
309 static inline void
310 atl_enable_intr(struct rte_eth_dev *dev)
311 {
312 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
313 
314 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
315 }
316 
317 static void
318 atl_disable_intr(struct aq_hw_s *hw)
319 {
320 	PMD_INIT_FUNC_TRACE();
321 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
322 }
323 
324 static int
325 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
326 {
327 	struct atl_adapter *adapter =
328 		(struct atl_adapter *)eth_dev->data->dev_private;
329 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
330 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
331 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
332 	int err = 0;
333 
334 	PMD_INIT_FUNC_TRACE();
335 
336 	eth_dev->dev_ops = &atl_eth_dev_ops;
337 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
338 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
339 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
340 
341 	/* For secondary processes, the primary process has done all the work */
342 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
343 		return 0;
344 
345 	/* Vendor and Device ID need to be set before init of shared code */
346 	hw->device_id = pci_dev->id.device_id;
347 	hw->vendor_id = pci_dev->id.vendor_id;
348 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
349 
350 	/* Hardware configuration - hardcode */
351 	adapter->hw_cfg.is_lro = false;
352 	adapter->hw_cfg.wol = false;
353 	adapter->hw_cfg.is_rss = false;
354 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
355 
356 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
357 			  AQ_NIC_RATE_5G |
358 			  AQ_NIC_RATE_2G5 |
359 			  AQ_NIC_RATE_1G |
360 			  AQ_NIC_RATE_100M;
361 
362 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
363 	adapter->hw_cfg.aq_rss.indirection_table_size =
364 		HW_ATL_B0_RSS_REDIRECTION_MAX;
365 
366 	hw->aq_nic_cfg = &adapter->hw_cfg;
367 
368 	/* disable interrupt */
369 	atl_disable_intr(hw);
370 
371 	/* Allocate memory for storing MAC addresses */
372 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
373 	if (eth_dev->data->mac_addrs == NULL) {
374 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
375 		return -ENOMEM;
376 	}
377 
378 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
379 	if (err)
380 		return err;
381 
382 	/* Copy the permanent MAC address */
383 	if (hw->aq_fw_ops->get_mac_permanent(hw,
384 			eth_dev->data->mac_addrs->addr_bytes) != 0)
385 		return -EINVAL;
386 
387 	/* Reset the hw statistics */
388 	atl_dev_stats_reset(eth_dev);
389 
390 	rte_intr_callback_register(intr_handle,
391 				   atl_dev_interrupt_handler, eth_dev);
392 
393 	/* enable uio/vfio intr/eventfd mapping */
394 	rte_intr_enable(intr_handle);
395 
396 	/* enable support intr */
397 	atl_enable_intr(eth_dev);
398 
399 	return err;
400 }
401 
402 static int
403 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
404 {
405 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
406 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
407 	struct aq_hw_s *hw;
408 
409 	PMD_INIT_FUNC_TRACE();
410 
411 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
412 		return -EPERM;
413 
414 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
415 
416 	if (hw->adapter_stopped == 0)
417 		atl_dev_close(eth_dev);
418 
419 	eth_dev->dev_ops = NULL;
420 	eth_dev->rx_pkt_burst = NULL;
421 	eth_dev->tx_pkt_burst = NULL;
422 
423 	/* disable uio intr before callback unregister */
424 	rte_intr_disable(intr_handle);
425 	rte_intr_callback_unregister(intr_handle,
426 				     atl_dev_interrupt_handler, eth_dev);
427 
428 	rte_free(eth_dev->data->mac_addrs);
429 	eth_dev->data->mac_addrs = NULL;
430 
431 	return 0;
432 }
433 
434 static int
435 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
436 	struct rte_pci_device *pci_dev)
437 {
438 	return rte_eth_dev_pci_generic_probe(pci_dev,
439 		sizeof(struct atl_adapter), eth_atl_dev_init);
440 }
441 
442 static int
443 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
444 {
445 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
446 }
447 
448 static int
449 atl_dev_configure(struct rte_eth_dev *dev)
450 {
451 	struct atl_interrupt *intr =
452 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
453 
454 	PMD_INIT_FUNC_TRACE();
455 
456 	/* set flag to update link status after init */
457 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
458 
459 	return 0;
460 }
461 
462 /*
463  * Configure device link speed and setup link.
464  * It returns 0 on success.
465  */
466 static int
467 atl_dev_start(struct rte_eth_dev *dev)
468 {
469 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
470 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
471 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
472 	uint32_t intr_vector = 0;
473 	int status;
474 	int err;
475 
476 	PMD_INIT_FUNC_TRACE();
477 
478 	/* set adapter started */
479 	hw->adapter_stopped = 0;
480 
481 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
482 		PMD_INIT_LOG(ERR,
483 		"Invalid link_speeds for port %u, fix speed not supported",
484 				dev->data->port_id);
485 		return -EINVAL;
486 	}
487 
488 	/* disable uio/vfio intr/eventfd mapping */
489 	rte_intr_disable(intr_handle);
490 
491 	/* reinitialize adapter
492 	 * this calls reset and start
493 	 */
494 	status = atl_reset_hw(hw);
495 	if (status != 0)
496 		return -EIO;
497 
498 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
499 
500 	hw_atl_b0_hw_start(hw);
501 	/* check and configure queue intr-vector mapping */
502 	if ((rte_intr_cap_multiple(intr_handle) ||
503 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
504 	    dev->data->dev_conf.intr_conf.rxq != 0) {
505 		intr_vector = dev->data->nb_rx_queues;
506 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
507 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
508 					ATL_MAX_INTR_QUEUE_NUM);
509 			return -ENOTSUP;
510 		}
511 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
512 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
513 			return -1;
514 		}
515 	}
516 
517 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
518 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
519 				    dev->data->nb_rx_queues * sizeof(int), 0);
520 		if (intr_handle->intr_vec == NULL) {
521 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
522 				     " intr_vec", dev->data->nb_rx_queues);
523 			return -ENOMEM;
524 		}
525 	}
526 
527 	/* initialize transmission unit */
528 	atl_tx_init(dev);
529 
530 	/* This can fail when allocating mbufs for descriptor rings */
531 	err = atl_rx_init(dev);
532 	if (err) {
533 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
534 		goto error;
535 	}
536 
537 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
538 		hw->fw_ver_actual >> 24,
539 		(hw->fw_ver_actual >> 16) & 0xFF,
540 		hw->fw_ver_actual & 0xFFFF);
541 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
542 
543 	err = atl_start_queues(dev);
544 	if (err < 0) {
545 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
546 		goto error;
547 	}
548 
549 	err = atl_dev_set_link_up(dev);
550 
551 	err = hw->aq_fw_ops->update_link_status(hw);
552 
553 	if (err)
554 		goto error;
555 
556 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
557 
558 	if (err)
559 		goto error;
560 
561 	if (rte_intr_allow_others(intr_handle)) {
562 		/* check if lsc interrupt is enabled */
563 		if (dev->data->dev_conf.intr_conf.lsc != 0)
564 			atl_dev_lsc_interrupt_setup(dev, true);
565 		else
566 			atl_dev_lsc_interrupt_setup(dev, false);
567 	} else {
568 		rte_intr_callback_unregister(intr_handle,
569 					     atl_dev_interrupt_handler, dev);
570 		if (dev->data->dev_conf.intr_conf.lsc != 0)
571 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
572 				     " no intr multiplex");
573 	}
574 
575 	/* check if rxq interrupt is enabled */
576 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
577 	    rte_intr_dp_is_en(intr_handle))
578 		atl_dev_rxq_interrupt_setup(dev);
579 
580 	/* enable uio/vfio intr/eventfd mapping */
581 	rte_intr_enable(intr_handle);
582 
583 	/* resume enabled intr since hw reset */
584 	atl_enable_intr(dev);
585 
586 	return 0;
587 
588 error:
589 	atl_stop_queues(dev);
590 	return -EIO;
591 }
592 
593 /*
594  * Stop device: disable rx and tx functions to allow for reconfiguring.
595  */
596 static void
597 atl_dev_stop(struct rte_eth_dev *dev)
598 {
599 	struct rte_eth_link link;
600 	struct aq_hw_s *hw =
601 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
602 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
603 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
604 
605 	PMD_INIT_FUNC_TRACE();
606 
607 	/* disable interrupts */
608 	atl_disable_intr(hw);
609 
610 	/* reset the NIC */
611 	atl_reset_hw(hw);
612 	hw->adapter_stopped = 1;
613 
614 	atl_stop_queues(dev);
615 
616 	/* Clear stored conf */
617 	dev->data->scattered_rx = 0;
618 	dev->data->lro = 0;
619 
620 	/* Clear recorded link status */
621 	memset(&link, 0, sizeof(link));
622 	rte_eth_linkstatus_set(dev, &link);
623 
624 	if (!rte_intr_allow_others(intr_handle))
625 		/* resume to the default handler */
626 		rte_intr_callback_register(intr_handle,
627 					   atl_dev_interrupt_handler,
628 					   (void *)dev);
629 
630 	/* Clean datapath event and queue/vec mapping */
631 	rte_intr_efd_disable(intr_handle);
632 	if (intr_handle->intr_vec != NULL) {
633 		rte_free(intr_handle->intr_vec);
634 		intr_handle->intr_vec = NULL;
635 	}
636 }
637 
638 /*
639  * Set device link up: enable tx.
640  */
641 static int
642 atl_dev_set_link_up(struct rte_eth_dev *dev)
643 {
644 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
645 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
646 	uint32_t speed_mask = 0;
647 
648 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
649 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
650 	} else {
651 		if (link_speeds & ETH_LINK_SPEED_10G)
652 			speed_mask |= AQ_NIC_RATE_10G;
653 		if (link_speeds & ETH_LINK_SPEED_5G)
654 			speed_mask |= AQ_NIC_RATE_5G;
655 		if (link_speeds & ETH_LINK_SPEED_1G)
656 			speed_mask |= AQ_NIC_RATE_1G;
657 		if (link_speeds & ETH_LINK_SPEED_2_5G)
658 			speed_mask |=  AQ_NIC_RATE_2G5;
659 		if (link_speeds & ETH_LINK_SPEED_100M)
660 			speed_mask |= AQ_NIC_RATE_100M;
661 	}
662 
663 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
664 }
665 
666 /*
667  * Set device link down: disable tx.
668  */
669 static int
670 atl_dev_set_link_down(struct rte_eth_dev *dev)
671 {
672 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
673 
674 	return hw->aq_fw_ops->set_link_speed(hw, 0);
675 }
676 
677 /*
678  * Reset and stop device.
679  */
680 static void
681 atl_dev_close(struct rte_eth_dev *dev)
682 {
683 	PMD_INIT_FUNC_TRACE();
684 
685 	atl_dev_stop(dev);
686 
687 	atl_free_queues(dev);
688 }
689 
690 static int
691 atl_dev_reset(struct rte_eth_dev *dev)
692 {
693 	int ret;
694 
695 	ret = eth_atl_dev_uninit(dev);
696 	if (ret)
697 		return ret;
698 
699 	ret = eth_atl_dev_init(dev);
700 
701 	return ret;
702 }
703 
704 static int
705 atl_dev_configure_macsec(struct rte_eth_dev *dev)
706 {
707 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
708 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
709 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
710 	struct macsec_msg_fw_request msg_macsec;
711 	struct macsec_msg_fw_response response;
712 
713 	if (!aqcfg->common.macsec_enabled ||
714 	    hw->aq_fw_ops->send_macsec_req == NULL)
715 		return 0;
716 
717 	memset(&msg_macsec, 0, sizeof(msg_macsec));
718 
719 	/* Creating set of sc/sa structures from parameters provided by DPDK */
720 
721 	/* Configure macsec */
722 	msg_macsec.msg_type = macsec_cfg_msg;
723 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
724 	msg_macsec.cfg.interrupts_enabled = 1;
725 
726 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
727 
728 	if (response.result)
729 		return -1;
730 
731 	memset(&msg_macsec, 0, sizeof(msg_macsec));
732 
733 	/* Configure TX SC */
734 
735 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
736 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
737 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
738 
739 	/* MAC addr for TX */
740 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
741 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
742 	msg_macsec.txsc.sa_mask = 0x3f;
743 
744 	msg_macsec.txsc.da_mask = 0;
745 	msg_macsec.txsc.tci = 0x0B;
746 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
747 
748 	/*
749 	 * Creating SCI (Secure Channel Identifier).
750 	 * SCI constructed from Source MAC and Port identifier
751 	 */
752 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
753 			       (msg_macsec.txsc.mac_sa[0] >> 16);
754 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
755 
756 	uint32_t port_identifier = 1;
757 
758 	msg_macsec.txsc.sci[1] = sci_hi_part;
759 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
760 
761 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
762 
763 	if (response.result)
764 		return -1;
765 
766 	memset(&msg_macsec, 0, sizeof(msg_macsec));
767 
768 	/* Configure RX SC */
769 
770 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
771 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
772 	msg_macsec.rxsc.replay_protect =
773 		aqcfg->common.replay_protection_enabled;
774 	msg_macsec.rxsc.anti_replay_window = 0;
775 
776 	/* MAC addr for RX */
777 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
778 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
779 	msg_macsec.rxsc.da_mask = 0;//0x3f;
780 
781 	msg_macsec.rxsc.sa_mask = 0;
782 
783 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
784 
785 	if (response.result)
786 		return -1;
787 
788 	memset(&msg_macsec, 0, sizeof(msg_macsec));
789 
790 	/* Configure RX SC */
791 
792 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
793 	msg_macsec.txsa.index = aqcfg->txsa.idx;
794 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
795 
796 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
797 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
798 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
799 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
800 
801 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
802 
803 	if (response.result)
804 		return -1;
805 
806 	memset(&msg_macsec, 0, sizeof(msg_macsec));
807 
808 	/* Configure RX SA */
809 
810 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
811 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
812 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
813 
814 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
815 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
816 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
817 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
818 
819 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
820 
821 	if (response.result)
822 		return -1;
823 
824 	return 0;
825 }
826 
827 int atl_macsec_enable(struct rte_eth_dev *dev,
828 		      uint8_t encr, uint8_t repl_prot)
829 {
830 	struct aq_hw_cfg_s *cfg =
831 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
832 
833 	cfg->aq_macsec.common.macsec_enabled = 1;
834 	cfg->aq_macsec.common.encryption_enabled = encr;
835 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
836 
837 	return 0;
838 }
839 
840 int atl_macsec_disable(struct rte_eth_dev *dev)
841 {
842 	struct aq_hw_cfg_s *cfg =
843 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
844 
845 	cfg->aq_macsec.common.macsec_enabled = 0;
846 
847 	return 0;
848 }
849 
850 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
851 {
852 	struct aq_hw_cfg_s *cfg =
853 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
854 
855 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
856 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
857 
858 	return 0;
859 }
860 
861 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
862 			   uint8_t *mac, uint16_t pi)
863 {
864 	struct aq_hw_cfg_s *cfg =
865 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
866 
867 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
868 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
869 	cfg->aq_macsec.rxsc.pi = pi;
870 
871 	return 0;
872 }
873 
874 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
875 			   uint8_t idx, uint8_t an,
876 			   uint32_t pn, uint8_t *key)
877 {
878 	struct aq_hw_cfg_s *cfg =
879 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
880 
881 	cfg->aq_macsec.txsa.idx = idx;
882 	cfg->aq_macsec.txsa.pn = pn;
883 	cfg->aq_macsec.txsa.an = an;
884 
885 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
886 	return 0;
887 }
888 
889 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
890 			   uint8_t idx, uint8_t an,
891 			   uint32_t pn, uint8_t *key)
892 {
893 	struct aq_hw_cfg_s *cfg =
894 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
895 
896 	cfg->aq_macsec.rxsa.idx = idx;
897 	cfg->aq_macsec.rxsa.pn = pn;
898 	cfg->aq_macsec.rxsa.an = an;
899 
900 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
901 	return 0;
902 }
903 
904 static int
905 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
906 {
907 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
908 	struct aq_hw_s *hw = &adapter->hw;
909 	struct atl_sw_stats *swstats = &adapter->sw_stats;
910 	unsigned int i;
911 
912 	hw->aq_fw_ops->update_stats(hw);
913 
914 	/* Fill out the rte_eth_stats statistics structure */
915 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
916 	stats->ibytes = hw->curr_stats.dma_oct_rc;
917 	stats->imissed = hw->curr_stats.dpc;
918 	stats->ierrors = hw->curr_stats.erpt;
919 
920 	stats->opackets = hw->curr_stats.dma_pkt_tc;
921 	stats->obytes = hw->curr_stats.dma_oct_tc;
922 	stats->oerrors = 0;
923 
924 	stats->rx_nombuf = swstats->rx_nombuf;
925 
926 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
927 		stats->q_ipackets[i] = swstats->q_ipackets[i];
928 		stats->q_opackets[i] = swstats->q_opackets[i];
929 		stats->q_ibytes[i] = swstats->q_ibytes[i];
930 		stats->q_obytes[i] = swstats->q_obytes[i];
931 		stats->q_errors[i] = swstats->q_errors[i];
932 	}
933 	return 0;
934 }
935 
936 static void
937 atl_dev_stats_reset(struct rte_eth_dev *dev)
938 {
939 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
940 	struct aq_hw_s *hw = &adapter->hw;
941 
942 	hw->aq_fw_ops->update_stats(hw);
943 
944 	/* Reset software totals */
945 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
946 
947 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
948 }
949 
950 static int
951 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
952 			 struct rte_eth_xstat_name *xstats_names,
953 			 unsigned int size)
954 {
955 	unsigned int i;
956 
957 	if (!xstats_names)
958 		return RTE_DIM(atl_xstats_tbl);
959 
960 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
961 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
962 			RTE_ETH_XSTATS_NAME_SIZE);
963 
964 	return i;
965 }
966 
967 static int
968 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
969 		   unsigned int n)
970 {
971 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
972 	struct aq_hw_s *hw = &adapter->hw;
973 	unsigned int i;
974 
975 	if (!stats)
976 		return 0;
977 
978 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
979 		stats[i].id = i;
980 		stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
981 					atl_xstats_tbl[i].offset);
982 	}
983 
984 	return i;
985 }
986 
987 static int
988 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
989 {
990 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
991 	uint32_t fw_ver = 0;
992 	unsigned int ret = 0;
993 
994 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
995 	if (ret)
996 		return -EIO;
997 
998 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
999 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1000 
1001 	ret += 1; /* add string null-terminator */
1002 
1003 	if (fw_size < ret)
1004 		return ret;
1005 
1006 	return 0;
1007 }
1008 
1009 static void
1010 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1011 {
1012 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1013 
1014 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1015 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1016 
1017 	dev_info->min_rx_bufsize = 1024;
1018 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1019 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1020 	dev_info->max_vfs = pci_dev->max_vfs;
1021 
1022 	dev_info->max_hash_mac_addrs = 0;
1023 	dev_info->max_vmdq_pools = 0;
1024 	dev_info->vmdq_queue_num = 0;
1025 
1026 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1027 
1028 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1029 
1030 
1031 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1032 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1033 	};
1034 
1035 	dev_info->default_txconf = (struct rte_eth_txconf) {
1036 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1037 	};
1038 
1039 	dev_info->rx_desc_lim = rx_desc_lim;
1040 	dev_info->tx_desc_lim = tx_desc_lim;
1041 
1042 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1043 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1044 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1045 
1046 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1047 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1048 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1049 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1050 }
1051 
1052 static const uint32_t *
1053 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1054 {
1055 	static const uint32_t ptypes[] = {
1056 		RTE_PTYPE_L2_ETHER,
1057 		RTE_PTYPE_L2_ETHER_ARP,
1058 		RTE_PTYPE_L2_ETHER_VLAN,
1059 		RTE_PTYPE_L3_IPV4,
1060 		RTE_PTYPE_L3_IPV6,
1061 		RTE_PTYPE_L4_TCP,
1062 		RTE_PTYPE_L4_UDP,
1063 		RTE_PTYPE_L4_SCTP,
1064 		RTE_PTYPE_L4_ICMP,
1065 		RTE_PTYPE_UNKNOWN
1066 	};
1067 
1068 	if (dev->rx_pkt_burst == atl_recv_pkts)
1069 		return ptypes;
1070 
1071 	return NULL;
1072 }
1073 
1074 static void
1075 atl_dev_delayed_handler(void *param)
1076 {
1077 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1078 
1079 	atl_dev_configure_macsec(dev);
1080 }
1081 
1082 
1083 /* return 0 means link status changed, -1 means not changed */
1084 static int
1085 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1086 {
1087 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1088 	struct rte_eth_link link, old;
1089 	int err = 0;
1090 
1091 	link.link_status = ETH_LINK_DOWN;
1092 	link.link_speed = 0;
1093 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1094 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1095 	memset(&old, 0, sizeof(old));
1096 
1097 	/* load old link status */
1098 	rte_eth_linkstatus_get(dev, &old);
1099 
1100 	/* read current link status */
1101 	err = hw->aq_fw_ops->update_link_status(hw);
1102 
1103 	if (err)
1104 		return 0;
1105 
1106 	if (hw->aq_link_status.mbps == 0) {
1107 		/* write default (down) link status */
1108 		rte_eth_linkstatus_set(dev, &link);
1109 		if (link.link_status == old.link_status)
1110 			return -1;
1111 		return 0;
1112 	}
1113 
1114 	link.link_status = ETH_LINK_UP;
1115 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1116 	link.link_speed = hw->aq_link_status.mbps;
1117 
1118 	rte_eth_linkstatus_set(dev, &link);
1119 
1120 	if (link.link_status == old.link_status)
1121 		return -1;
1122 
1123 	if (rte_eal_alarm_set(1000 * 1000,
1124 			      atl_dev_delayed_handler, (void *)dev) < 0)
1125 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1126 
1127 	return 0;
1128 }
1129 
1130 static void
1131 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1132 {
1133 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1134 
1135 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1136 }
1137 
1138 static void
1139 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1140 {
1141 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1142 
1143 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1144 }
1145 
1146 static void
1147 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1148 {
1149 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1150 
1151 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1152 }
1153 
1154 static void
1155 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1156 {
1157 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1158 
1159 	if (dev->data->promiscuous == 1)
1160 		return; /* must remain in all_multicast mode */
1161 
1162 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1163 }
1164 
1165 /**
1166  * It clears the interrupt causes and enables the interrupt.
1167  * It will be called once only during nic initialized.
1168  *
1169  * @param dev
1170  *  Pointer to struct rte_eth_dev.
1171  * @param on
1172  *  Enable or Disable.
1173  *
1174  * @return
1175  *  - On success, zero.
1176  *  - On failure, a negative value.
1177  */
1178 
1179 static int
1180 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1181 {
1182 	atl_dev_link_status_print(dev);
1183 	return 0;
1184 }
1185 
1186 static int
1187 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1188 {
1189 	return 0;
1190 }
1191 
1192 
1193 static int
1194 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1195 {
1196 	struct atl_interrupt *intr =
1197 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1198 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1199 	u64 cause = 0;
1200 
1201 	hw_atl_b0_hw_irq_read(hw, &cause);
1202 
1203 	atl_disable_intr(hw);
1204 
1205 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1206 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1207 
1208 	return 0;
1209 }
1210 
1211 /**
1212  * It gets and then prints the link status.
1213  *
1214  * @param dev
1215  *  Pointer to struct rte_eth_dev.
1216  *
1217  * @return
1218  *  - On success, zero.
1219  *  - On failure, a negative value.
1220  */
1221 static void
1222 atl_dev_link_status_print(struct rte_eth_dev *dev)
1223 {
1224 	struct rte_eth_link link;
1225 
1226 	memset(&link, 0, sizeof(link));
1227 	rte_eth_linkstatus_get(dev, &link);
1228 	if (link.link_status) {
1229 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1230 					(int)(dev->data->port_id),
1231 					(unsigned int)link.link_speed,
1232 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1233 					"full-duplex" : "half-duplex");
1234 	} else {
1235 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1236 				(int)(dev->data->port_id));
1237 	}
1238 
1239 
1240 #ifdef DEBUG
1241 {
1242 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1243 
1244 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1245 				pci_dev->addr.domain,
1246 				pci_dev->addr.bus,
1247 				pci_dev->addr.devid,
1248 				pci_dev->addr.function);
1249 }
1250 #endif
1251 
1252 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1253 }
1254 
1255 /*
1256  * It executes link_update after knowing an interrupt occurred.
1257  *
1258  * @param dev
1259  *  Pointer to struct rte_eth_dev.
1260  *
1261  * @return
1262  *  - On success, zero.
1263  *  - On failure, a negative value.
1264  */
1265 static int
1266 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1267 			   struct rte_intr_handle *intr_handle)
1268 {
1269 	struct atl_interrupt *intr =
1270 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1271 	struct atl_adapter *adapter =
1272 		(struct atl_adapter *)dev->data->dev_private;
1273 	struct aq_hw_s *hw = &adapter->hw;
1274 
1275 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1276 		goto done;
1277 
1278 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1279 
1280 	/* Notify userapp if link status changed */
1281 	if (!atl_dev_link_update(dev, 0)) {
1282 		atl_dev_link_status_print(dev);
1283 		_rte_eth_dev_callback_process(dev,
1284 			RTE_ETH_EVENT_INTR_LSC, NULL);
1285 	} else {
1286 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1287 			goto done;
1288 
1289 		/* Check macsec Keys expired */
1290 		struct get_stats req = { 0 };
1291 		struct macsec_msg_fw_request msg = { 0 };
1292 		struct macsec_msg_fw_response resp = { 0 };
1293 
1294 		req.ingress_sa_index = 0x0;
1295 		req.egress_sc_index = 0x0;
1296 		req.egress_sa_index = 0x0;
1297 		msg.msg_type = macsec_get_stats_msg;
1298 		msg.stats = req;
1299 
1300 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1301 		if (err) {
1302 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1303 			goto done;
1304 		}
1305 		if (resp.stats.egress_threshold_expired ||
1306 		    resp.stats.ingress_threshold_expired ||
1307 		    resp.stats.egress_expired ||
1308 		    resp.stats.ingress_expired) {
1309 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1310 			_rte_eth_dev_callback_process(dev,
1311 				RTE_ETH_EVENT_MACSEC, NULL);
1312 		}
1313 	}
1314 done:
1315 	atl_enable_intr(dev);
1316 	rte_intr_enable(intr_handle);
1317 
1318 	return 0;
1319 }
1320 
1321 /**
1322  * Interrupt handler triggered by NIC  for handling
1323  * specific interrupt.
1324  *
1325  * @param handle
1326  *  Pointer to interrupt handle.
1327  * @param param
1328  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1329  *
1330  * @return
1331  *  void
1332  */
1333 static void
1334 atl_dev_interrupt_handler(void *param)
1335 {
1336 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1337 
1338 	atl_dev_interrupt_get_status(dev);
1339 	atl_dev_interrupt_action(dev, dev->intr_handle);
1340 }
1341 
1342 #define SFP_EEPROM_SIZE 0xff
1343 
1344 static int
1345 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1346 {
1347 	return SFP_EEPROM_SIZE;
1348 }
1349 
1350 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1351 		       struct rte_dev_eeprom_info *eeprom)
1352 {
1353 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1354 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1355 
1356 	if (hw->aq_fw_ops->get_eeprom == NULL)
1357 		return -ENOTSUP;
1358 
1359 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1360 	    eeprom->data == NULL)
1361 		return -EINVAL;
1362 
1363 	if (eeprom->magic)
1364 		dev_addr = eeprom->magic;
1365 
1366 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1367 					 eeprom->length, eeprom->offset);
1368 }
1369 
1370 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1371 		       struct rte_dev_eeprom_info *eeprom)
1372 {
1373 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1374 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1375 
1376 	if (hw->aq_fw_ops->set_eeprom == NULL)
1377 		return -ENOTSUP;
1378 
1379 	if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1380 		return -EINVAL;
1381 
1382 	if (eeprom->magic)
1383 		dev_addr = eeprom->magic;
1384 
1385 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr,
1386 					 eeprom->data, eeprom->length);
1387 }
1388 
1389 static int
1390 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1391 {
1392 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1393 	u32 mif_id;
1394 	int err;
1395 
1396 	if (regs->data == NULL) {
1397 		regs->length = hw_atl_utils_hw_get_reg_length();
1398 		regs->width = sizeof(u32);
1399 		return 0;
1400 	}
1401 
1402 	/* Only full register dump is supported */
1403 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1404 		return -ENOTSUP;
1405 
1406 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1407 
1408 	/* Device version */
1409 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1410 	regs->version = mif_id & 0xFFU;
1411 
1412 	return err;
1413 }
1414 
1415 static int
1416 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1417 {
1418 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1419 
1420 	if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1421 		fc_conf->mode = RTE_FC_NONE;
1422 	else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1423 		fc_conf->mode = RTE_FC_FULL;
1424 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1425 		fc_conf->mode = RTE_FC_RX_PAUSE;
1426 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1427 		fc_conf->mode = RTE_FC_TX_PAUSE;
1428 
1429 	return 0;
1430 }
1431 
1432 static int
1433 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1434 {
1435 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1436 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1437 
1438 
1439 	if (hw->aq_fw_ops->set_flow_control == NULL)
1440 		return -ENOTSUP;
1441 
1442 	if (fc_conf->mode == RTE_FC_NONE)
1443 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1444 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1445 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1446 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1447 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1448 	else if (fc_conf->mode == RTE_FC_FULL)
1449 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1450 
1451 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1452 		return hw->aq_fw_ops->set_flow_control(hw);
1453 
1454 	return 0;
1455 }
1456 
1457 static int
1458 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1459 		    u8 *mac_addr, bool enable)
1460 {
1461 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1462 	unsigned int h = 0U;
1463 	unsigned int l = 0U;
1464 	int err;
1465 
1466 	if (mac_addr) {
1467 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1468 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1469 			(mac_addr[4] << 8) | mac_addr[5];
1470 	}
1471 
1472 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1473 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1474 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1475 
1476 	if (enable)
1477 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1478 
1479 	err = aq_hw_err_from_flags(hw);
1480 
1481 	return err;
1482 }
1483 
1484 static int
1485 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1486 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1487 {
1488 	if (is_zero_ether_addr(mac_addr)) {
1489 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1490 		return -EINVAL;
1491 	}
1492 
1493 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1494 }
1495 
1496 static void
1497 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1498 {
1499 	atl_update_mac_addr(dev, index, NULL, false);
1500 }
1501 
1502 static int
1503 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1504 {
1505 	atl_remove_mac_addr(dev, 0);
1506 	atl_add_mac_addr(dev, addr, 0, 0);
1507 	return 0;
1508 }
1509 
1510 static int
1511 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1512 {
1513 	struct rte_eth_dev_info dev_info;
1514 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1515 
1516 	atl_dev_info_get(dev, &dev_info);
1517 
1518 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1519 		return -EINVAL;
1520 
1521 	/* update max frame size */
1522 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1523 
1524 	return 0;
1525 }
1526 
1527 static int
1528 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1529 {
1530 	struct aq_hw_cfg_s *cfg =
1531 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1532 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1533 	int err = 0;
1534 	int i = 0;
1535 
1536 	PMD_INIT_FUNC_TRACE();
1537 
1538 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1539 		if (cfg->vlan_filter[i] == vlan_id) {
1540 			if (!on) {
1541 				/* Disable VLAN filter. */
1542 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1543 
1544 				/* Clear VLAN filter entry */
1545 				cfg->vlan_filter[i] = 0;
1546 			}
1547 			break;
1548 		}
1549 	}
1550 
1551 	/* VLAN_ID was not found. So, nothing to delete. */
1552 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1553 		goto exit;
1554 
1555 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1556 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1557 		goto exit;
1558 
1559 	/* Try to found free VLAN filter to add new VLAN_ID */
1560 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1561 		if (cfg->vlan_filter[i] == 0)
1562 			break;
1563 	}
1564 
1565 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1566 		/* We have no free VLAN filter to add new VLAN_ID*/
1567 		err = -ENOMEM;
1568 		goto exit;
1569 	}
1570 
1571 	cfg->vlan_filter[i] = vlan_id;
1572 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1573 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1574 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1575 
1576 exit:
1577 	/* Enable VLAN promisc mode if vlan_filter empty  */
1578 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1579 		if (cfg->vlan_filter[i] != 0)
1580 			break;
1581 	}
1582 
1583 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1584 
1585 	return err;
1586 }
1587 
1588 static int
1589 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1590 {
1591 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1592 	struct aq_hw_cfg_s *cfg =
1593 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1594 	int i;
1595 
1596 	PMD_INIT_FUNC_TRACE();
1597 
1598 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1599 		if (cfg->vlan_filter[i])
1600 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1601 	}
1602 	return 0;
1603 }
1604 
1605 static int
1606 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1607 {
1608 	struct aq_hw_cfg_s *cfg =
1609 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1610 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1611 	int ret = 0;
1612 	int i;
1613 
1614 	PMD_INIT_FUNC_TRACE();
1615 
1616 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1617 
1618 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1619 
1620 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1621 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1622 
1623 	if (mask & ETH_VLAN_EXTEND_MASK)
1624 		ret = -ENOTSUP;
1625 
1626 	return ret;
1627 }
1628 
1629 static int
1630 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1631 		  uint16_t tpid)
1632 {
1633 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1634 	int err = 0;
1635 
1636 	PMD_INIT_FUNC_TRACE();
1637 
1638 	switch (vlan_type) {
1639 	case ETH_VLAN_TYPE_INNER:
1640 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1641 		break;
1642 	case ETH_VLAN_TYPE_OUTER:
1643 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1644 		break;
1645 	default:
1646 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1647 		err = -ENOTSUP;
1648 	}
1649 
1650 	return err;
1651 }
1652 
1653 static void
1654 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1655 {
1656 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1657 
1658 	PMD_INIT_FUNC_TRACE();
1659 
1660 	if (queue_id > dev->data->nb_rx_queues) {
1661 		PMD_DRV_LOG(ERR, "Invalid queue id");
1662 		return;
1663 	}
1664 
1665 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1666 }
1667 
1668 static int
1669 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1670 			  struct ether_addr *mc_addr_set,
1671 			  uint32_t nb_mc_addr)
1672 {
1673 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1674 	u32 i;
1675 
1676 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1677 		return -EINVAL;
1678 
1679 	/* Update whole uc filters table */
1680 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1681 		u8 *mac_addr = NULL;
1682 		u32 l = 0, h = 0;
1683 
1684 		if (i < nb_mc_addr) {
1685 			mac_addr = mc_addr_set[i].addr_bytes;
1686 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1687 				(mac_addr[4] << 8) | mac_addr[5];
1688 			h = (mac_addr[0] << 8) | mac_addr[1];
1689 		}
1690 
1691 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1692 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1693 							HW_ATL_B0_MAC_MIN + i);
1694 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1695 							HW_ATL_B0_MAC_MIN + i);
1696 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1697 					   HW_ATL_B0_MAC_MIN + i);
1698 	}
1699 
1700 	return 0;
1701 }
1702 
1703 static int
1704 atl_reta_update(struct rte_eth_dev *dev,
1705 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1706 		   uint16_t reta_size)
1707 {
1708 	int i;
1709 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1710 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1711 
1712 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1713 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1714 					dev->data->nb_rx_queues - 1);
1715 
1716 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1717 	return 0;
1718 }
1719 
1720 static int
1721 atl_reta_query(struct rte_eth_dev *dev,
1722 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1723 		    uint16_t reta_size)
1724 {
1725 	int i;
1726 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1727 
1728 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1729 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1730 	reta_conf->mask = ~0U;
1731 	return 0;
1732 }
1733 
1734 static int
1735 atl_rss_hash_update(struct rte_eth_dev *dev,
1736 				 struct rte_eth_rss_conf *rss_conf)
1737 {
1738 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1739 	struct aq_hw_cfg_s *cfg =
1740 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1741 	static u8 def_rss_key[40] = {
1742 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1743 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1744 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1745 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1746 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1747 	};
1748 
1749 	cfg->is_rss = !!rss_conf->rss_hf;
1750 	if (rss_conf->rss_key) {
1751 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1752 		       rss_conf->rss_key_len);
1753 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1754 	} else {
1755 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1756 		       sizeof(def_rss_key));
1757 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1758 	}
1759 
1760 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1761 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1762 	return 0;
1763 }
1764 
1765 static int
1766 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1767 				 struct rte_eth_rss_conf *rss_conf)
1768 {
1769 	struct aq_hw_cfg_s *cfg =
1770 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1771 
1772 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1773 	if (rss_conf->rss_key) {
1774 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1775 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1776 		       rss_conf->rss_key_len);
1777 	}
1778 
1779 	return 0;
1780 }
1781 
1782 static bool
1783 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1784 {
1785 	if (strcmp(dev->device->driver->name, drv->driver.name))
1786 		return false;
1787 
1788 	return true;
1789 }
1790 
1791 bool
1792 is_atlantic_supported(struct rte_eth_dev *dev)
1793 {
1794 	return is_device_supported(dev, &rte_atl_pmd);
1795 }
1796 
1797 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1798 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1799 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1800 
1801 RTE_INIT(atl_init_log)
1802 {
1803 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1804 	if (atl_logtype_init >= 0)
1805 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1806 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1807 	if (atl_logtype_driver >= 0)
1808 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1809 }
1810 
1811