xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision 0ecc27f28d202a3356a8601e6762b601ea822c4c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 				    struct rte_eth_xstat_name *xstats_names,
35 				    unsigned int size);
36 
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 				struct rte_eth_stats *stats);
39 
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 			      struct rte_eth_xstat *stats, unsigned int n);
42 
43 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
44 
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 			      size_t fw_size);
47 
48 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
49 
50 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
51 
52 /* VLAN stuff */
53 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
54 		uint16_t vlan_id, int on);
55 
56 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
57 
58 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
59 				     uint16_t queue_id, int on);
60 
61 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
62 			     enum rte_vlan_type vlan_type, uint16_t tpid);
63 
64 /* EEPROM */
65 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
66 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
67 			      struct rte_dev_eeprom_info *eeprom);
68 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
69 			      struct rte_dev_eeprom_info *eeprom);
70 
71 /* Regs */
72 static int atl_dev_get_regs(struct rte_eth_dev *dev,
73 			    struct rte_dev_reg_info *regs);
74 
75 /* Flow control */
76 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
77 			       struct rte_eth_fc_conf *fc_conf);
78 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
79 			       struct rte_eth_fc_conf *fc_conf);
80 
81 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
82 
83 /* Interrupts */
84 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
85 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
86 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
87 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
88 				    struct rte_intr_handle *handle);
89 static void atl_dev_interrupt_handler(void *param);
90 
91 
92 static int atl_add_mac_addr(struct rte_eth_dev *dev,
93 			    struct rte_ether_addr *mac_addr,
94 			    uint32_t index, uint32_t pool);
95 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
96 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
97 					   struct rte_ether_addr *mac_addr);
98 
99 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
100 				    struct rte_ether_addr *mc_addr_set,
101 				    uint32_t nb_mc_addr);
102 
103 /* RSS */
104 static int atl_reta_update(struct rte_eth_dev *dev,
105 			     struct rte_eth_rss_reta_entry64 *reta_conf,
106 			     uint16_t reta_size);
107 static int atl_reta_query(struct rte_eth_dev *dev,
108 			    struct rte_eth_rss_reta_entry64 *reta_conf,
109 			    uint16_t reta_size);
110 static int atl_rss_hash_update(struct rte_eth_dev *dev,
111 				 struct rte_eth_rss_conf *rss_conf);
112 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
113 				   struct rte_eth_rss_conf *rss_conf);
114 
115 
116 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
117 	struct rte_pci_device *pci_dev);
118 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
119 
120 static int atl_dev_info_get(struct rte_eth_dev *dev,
121 				struct rte_eth_dev_info *dev_info);
122 
123 int atl_logtype_init;
124 int atl_logtype_driver;
125 
126 /*
127  * The set of PCI devices this driver supports
128  */
129 static const struct rte_pci_id pci_id_atl_map[] = {
130 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
135 
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
142 
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
149 
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
152 	{ .vendor_id = 0, /* sentinel */ },
153 };
154 
155 static struct rte_pci_driver rte_atl_pmd = {
156 	.id_table = pci_id_atl_map,
157 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
158 	.probe = eth_atl_pci_probe,
159 	.remove = eth_atl_pci_remove,
160 };
161 
162 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
163 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
164 			| DEV_RX_OFFLOAD_UDP_CKSUM \
165 			| DEV_RX_OFFLOAD_TCP_CKSUM \
166 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
167 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
168 			| DEV_RX_OFFLOAD_VLAN_FILTER)
169 
170 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
171 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
172 			| DEV_TX_OFFLOAD_UDP_CKSUM \
173 			| DEV_TX_OFFLOAD_TCP_CKSUM \
174 			| DEV_TX_OFFLOAD_TCP_TSO \
175 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
176 			| DEV_TX_OFFLOAD_MULTI_SEGS)
177 
178 #define SFP_EEPROM_SIZE 0x100
179 
180 static const struct rte_eth_desc_lim rx_desc_lim = {
181 	.nb_max = ATL_MAX_RING_DESC,
182 	.nb_min = ATL_MIN_RING_DESC,
183 	.nb_align = ATL_RXD_ALIGN,
184 };
185 
186 static const struct rte_eth_desc_lim tx_desc_lim = {
187 	.nb_max = ATL_MAX_RING_DESC,
188 	.nb_min = ATL_MIN_RING_DESC,
189 	.nb_align = ATL_TXD_ALIGN,
190 	.nb_seg_max = ATL_TX_MAX_SEG,
191 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
192 };
193 
194 enum atl_xstats_type {
195 	XSTATS_TYPE_MSM = 0,
196 	XSTATS_TYPE_MACSEC,
197 };
198 
199 #define ATL_XSTATS_FIELD(name) { \
200 	#name, \
201 	offsetof(struct aq_stats_s, name), \
202 	XSTATS_TYPE_MSM \
203 }
204 
205 #define ATL_MACSEC_XSTATS_FIELD(name) { \
206 	#name, \
207 	offsetof(struct macsec_stats, name), \
208 	XSTATS_TYPE_MACSEC \
209 }
210 
211 struct atl_xstats_tbl_s {
212 	const char *name;
213 	unsigned int offset;
214 	enum atl_xstats_type type;
215 };
216 
217 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
218 	ATL_XSTATS_FIELD(uprc),
219 	ATL_XSTATS_FIELD(mprc),
220 	ATL_XSTATS_FIELD(bprc),
221 	ATL_XSTATS_FIELD(erpt),
222 	ATL_XSTATS_FIELD(uptc),
223 	ATL_XSTATS_FIELD(mptc),
224 	ATL_XSTATS_FIELD(bptc),
225 	ATL_XSTATS_FIELD(erpr),
226 	ATL_XSTATS_FIELD(ubrc),
227 	ATL_XSTATS_FIELD(ubtc),
228 	ATL_XSTATS_FIELD(mbrc),
229 	ATL_XSTATS_FIELD(mbtc),
230 	ATL_XSTATS_FIELD(bbrc),
231 	ATL_XSTATS_FIELD(bbtc),
232 	/* Ingress Common Counters */
233 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
234 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
235 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
236 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
237 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
239 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
241 	/* Ingress SA Counters */
242 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
244 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
245 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
246 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
247 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
250 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
251 	/* Egress Common Counters */
252 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
254 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
255 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
256 	/* Egress SC Counters */
257 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
259 	/* Egress SA Counters */
260 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
261 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
262 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
263 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
264 };
265 
266 static const struct eth_dev_ops atl_eth_dev_ops = {
267 	.dev_configure	      = atl_dev_configure,
268 	.dev_start	      = atl_dev_start,
269 	.dev_stop	      = atl_dev_stop,
270 	.dev_set_link_up      = atl_dev_set_link_up,
271 	.dev_set_link_down    = atl_dev_set_link_down,
272 	.dev_close	      = atl_dev_close,
273 	.dev_reset	      = atl_dev_reset,
274 
275 	/* PROMISC */
276 	.promiscuous_enable   = atl_dev_promiscuous_enable,
277 	.promiscuous_disable  = atl_dev_promiscuous_disable,
278 	.allmulticast_enable  = atl_dev_allmulticast_enable,
279 	.allmulticast_disable = atl_dev_allmulticast_disable,
280 
281 	/* Link */
282 	.link_update	      = atl_dev_link_update,
283 
284 	.get_reg              = atl_dev_get_regs,
285 
286 	/* Stats */
287 	.stats_get	      = atl_dev_stats_get,
288 	.xstats_get	      = atl_dev_xstats_get,
289 	.xstats_get_names     = atl_dev_xstats_get_names,
290 	.stats_reset	      = atl_dev_stats_reset,
291 	.xstats_reset	      = atl_dev_stats_reset,
292 
293 	.fw_version_get       = atl_fw_version_get,
294 	.dev_infos_get	      = atl_dev_info_get,
295 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
296 
297 	.mtu_set              = atl_dev_mtu_set,
298 
299 	/* VLAN */
300 	.vlan_filter_set      = atl_vlan_filter_set,
301 	.vlan_offload_set     = atl_vlan_offload_set,
302 	.vlan_tpid_set        = atl_vlan_tpid_set,
303 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
304 
305 	/* Queue Control */
306 	.rx_queue_start	      = atl_rx_queue_start,
307 	.rx_queue_stop	      = atl_rx_queue_stop,
308 	.rx_queue_setup       = atl_rx_queue_setup,
309 	.rx_queue_release     = atl_rx_queue_release,
310 
311 	.tx_queue_start	      = atl_tx_queue_start,
312 	.tx_queue_stop	      = atl_tx_queue_stop,
313 	.tx_queue_setup       = atl_tx_queue_setup,
314 	.tx_queue_release     = atl_tx_queue_release,
315 
316 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
317 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
318 
319 	.rx_queue_count       = atl_rx_queue_count,
320 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
321 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
322 
323 	/* EEPROM */
324 	.get_eeprom_length    = atl_dev_get_eeprom_length,
325 	.get_eeprom           = atl_dev_get_eeprom,
326 	.set_eeprom           = atl_dev_set_eeprom,
327 
328 	/* Flow Control */
329 	.flow_ctrl_get	      = atl_flow_ctrl_get,
330 	.flow_ctrl_set	      = atl_flow_ctrl_set,
331 
332 	/* MAC */
333 	.mac_addr_add	      = atl_add_mac_addr,
334 	.mac_addr_remove      = atl_remove_mac_addr,
335 	.mac_addr_set	      = atl_set_default_mac_addr,
336 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
337 	.rxq_info_get	      = atl_rxq_info_get,
338 	.txq_info_get	      = atl_txq_info_get,
339 
340 	.reta_update          = atl_reta_update,
341 	.reta_query           = atl_reta_query,
342 	.rss_hash_update      = atl_rss_hash_update,
343 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
344 };
345 
346 static inline int32_t
347 atl_reset_hw(struct aq_hw_s *hw)
348 {
349 	return hw_atl_b0_hw_reset(hw);
350 }
351 
352 static inline void
353 atl_enable_intr(struct rte_eth_dev *dev)
354 {
355 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
356 
357 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
358 }
359 
360 static void
361 atl_disable_intr(struct aq_hw_s *hw)
362 {
363 	PMD_INIT_FUNC_TRACE();
364 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
365 }
366 
367 static int
368 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
369 {
370 	struct atl_adapter *adapter = eth_dev->data->dev_private;
371 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
372 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
373 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
374 	int err = 0;
375 
376 	PMD_INIT_FUNC_TRACE();
377 
378 	eth_dev->dev_ops = &atl_eth_dev_ops;
379 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
380 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
381 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
382 
383 	/* For secondary processes, the primary process has done all the work */
384 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
385 		return 0;
386 
387 	/* Vendor and Device ID need to be set before init of shared code */
388 	hw->device_id = pci_dev->id.device_id;
389 	hw->vendor_id = pci_dev->id.vendor_id;
390 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
391 
392 	/* Hardware configuration - hardcode */
393 	adapter->hw_cfg.is_lro = false;
394 	adapter->hw_cfg.wol = false;
395 	adapter->hw_cfg.is_rss = false;
396 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
397 
398 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
399 			  AQ_NIC_RATE_5G |
400 			  AQ_NIC_RATE_2G5 |
401 			  AQ_NIC_RATE_1G |
402 			  AQ_NIC_RATE_100M;
403 
404 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
405 	adapter->hw_cfg.aq_rss.indirection_table_size =
406 		HW_ATL_B0_RSS_REDIRECTION_MAX;
407 
408 	hw->aq_nic_cfg = &adapter->hw_cfg;
409 
410 	/* disable interrupt */
411 	atl_disable_intr(hw);
412 
413 	/* Allocate memory for storing MAC addresses */
414 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415 					RTE_ETHER_ADDR_LEN, 0);
416 	if (eth_dev->data->mac_addrs == NULL) {
417 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
418 		return -ENOMEM;
419 	}
420 
421 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
422 	if (err)
423 		return err;
424 
425 	/* Copy the permanent MAC address */
426 	if (hw->aq_fw_ops->get_mac_permanent(hw,
427 			eth_dev->data->mac_addrs->addr_bytes) != 0)
428 		return -EINVAL;
429 
430 	/* Reset the hw statistics */
431 	atl_dev_stats_reset(eth_dev);
432 
433 	rte_intr_callback_register(intr_handle,
434 				   atl_dev_interrupt_handler, eth_dev);
435 
436 	/* enable uio/vfio intr/eventfd mapping */
437 	rte_intr_enable(intr_handle);
438 
439 	/* enable support intr */
440 	atl_enable_intr(eth_dev);
441 
442 	return err;
443 }
444 
445 static int
446 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
447 {
448 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
449 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
450 	struct aq_hw_s *hw;
451 
452 	PMD_INIT_FUNC_TRACE();
453 
454 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
455 		return -EPERM;
456 
457 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
458 
459 	if (hw->adapter_stopped == 0)
460 		atl_dev_close(eth_dev);
461 
462 	eth_dev->dev_ops = NULL;
463 	eth_dev->rx_pkt_burst = NULL;
464 	eth_dev->tx_pkt_burst = NULL;
465 
466 	/* disable uio intr before callback unregister */
467 	rte_intr_disable(intr_handle);
468 	rte_intr_callback_unregister(intr_handle,
469 				     atl_dev_interrupt_handler, eth_dev);
470 
471 	rte_free(eth_dev->data->mac_addrs);
472 	eth_dev->data->mac_addrs = NULL;
473 
474 	return 0;
475 }
476 
477 static int
478 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
479 	struct rte_pci_device *pci_dev)
480 {
481 	return rte_eth_dev_pci_generic_probe(pci_dev,
482 		sizeof(struct atl_adapter), eth_atl_dev_init);
483 }
484 
485 static int
486 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
487 {
488 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
489 }
490 
491 static int
492 atl_dev_configure(struct rte_eth_dev *dev)
493 {
494 	struct atl_interrupt *intr =
495 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
496 
497 	PMD_INIT_FUNC_TRACE();
498 
499 	/* set flag to update link status after init */
500 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
501 
502 	return 0;
503 }
504 
505 /*
506  * Configure device link speed and setup link.
507  * It returns 0 on success.
508  */
509 static int
510 atl_dev_start(struct rte_eth_dev *dev)
511 {
512 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
513 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
514 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
515 	uint32_t intr_vector = 0;
516 	int status;
517 	int err;
518 
519 	PMD_INIT_FUNC_TRACE();
520 
521 	/* set adapter started */
522 	hw->adapter_stopped = 0;
523 
524 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
525 		PMD_INIT_LOG(ERR,
526 		"Invalid link_speeds for port %u, fix speed not supported",
527 				dev->data->port_id);
528 		return -EINVAL;
529 	}
530 
531 	/* disable uio/vfio intr/eventfd mapping */
532 	rte_intr_disable(intr_handle);
533 
534 	/* reinitialize adapter
535 	 * this calls reset and start
536 	 */
537 	status = atl_reset_hw(hw);
538 	if (status != 0)
539 		return -EIO;
540 
541 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
542 
543 	hw_atl_b0_hw_start(hw);
544 	/* check and configure queue intr-vector mapping */
545 	if ((rte_intr_cap_multiple(intr_handle) ||
546 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
547 	    dev->data->dev_conf.intr_conf.rxq != 0) {
548 		intr_vector = dev->data->nb_rx_queues;
549 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
550 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
551 					ATL_MAX_INTR_QUEUE_NUM);
552 			return -ENOTSUP;
553 		}
554 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
555 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
556 			return -1;
557 		}
558 	}
559 
560 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
561 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
562 				    dev->data->nb_rx_queues * sizeof(int), 0);
563 		if (intr_handle->intr_vec == NULL) {
564 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
565 				     " intr_vec", dev->data->nb_rx_queues);
566 			return -ENOMEM;
567 		}
568 	}
569 
570 	/* initialize transmission unit */
571 	atl_tx_init(dev);
572 
573 	/* This can fail when allocating mbufs for descriptor rings */
574 	err = atl_rx_init(dev);
575 	if (err) {
576 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
577 		goto error;
578 	}
579 
580 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
581 		hw->fw_ver_actual >> 24,
582 		(hw->fw_ver_actual >> 16) & 0xFF,
583 		hw->fw_ver_actual & 0xFFFF);
584 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
585 
586 	err = atl_start_queues(dev);
587 	if (err < 0) {
588 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
589 		goto error;
590 	}
591 
592 	err = atl_dev_set_link_up(dev);
593 
594 	err = hw->aq_fw_ops->update_link_status(hw);
595 
596 	if (err)
597 		goto error;
598 
599 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
600 
601 	if (rte_intr_allow_others(intr_handle)) {
602 		/* check if lsc interrupt is enabled */
603 		if (dev->data->dev_conf.intr_conf.lsc != 0)
604 			atl_dev_lsc_interrupt_setup(dev, true);
605 		else
606 			atl_dev_lsc_interrupt_setup(dev, false);
607 	} else {
608 		rte_intr_callback_unregister(intr_handle,
609 					     atl_dev_interrupt_handler, dev);
610 		if (dev->data->dev_conf.intr_conf.lsc != 0)
611 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
612 				     " no intr multiplex");
613 	}
614 
615 	/* check if rxq interrupt is enabled */
616 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
617 	    rte_intr_dp_is_en(intr_handle))
618 		atl_dev_rxq_interrupt_setup(dev);
619 
620 	/* enable uio/vfio intr/eventfd mapping */
621 	rte_intr_enable(intr_handle);
622 
623 	/* resume enabled intr since hw reset */
624 	atl_enable_intr(dev);
625 
626 	return 0;
627 
628 error:
629 	atl_stop_queues(dev);
630 	return -EIO;
631 }
632 
633 /*
634  * Stop device: disable rx and tx functions to allow for reconfiguring.
635  */
636 static void
637 atl_dev_stop(struct rte_eth_dev *dev)
638 {
639 	struct rte_eth_link link;
640 	struct aq_hw_s *hw =
641 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
643 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
644 
645 	PMD_INIT_FUNC_TRACE();
646 
647 	/* disable interrupts */
648 	atl_disable_intr(hw);
649 
650 	/* reset the NIC */
651 	atl_reset_hw(hw);
652 	hw->adapter_stopped = 1;
653 
654 	atl_stop_queues(dev);
655 
656 	/* Clear stored conf */
657 	dev->data->scattered_rx = 0;
658 	dev->data->lro = 0;
659 
660 	/* Clear recorded link status */
661 	memset(&link, 0, sizeof(link));
662 	rte_eth_linkstatus_set(dev, &link);
663 
664 	if (!rte_intr_allow_others(intr_handle))
665 		/* resume to the default handler */
666 		rte_intr_callback_register(intr_handle,
667 					   atl_dev_interrupt_handler,
668 					   (void *)dev);
669 
670 	/* Clean datapath event and queue/vec mapping */
671 	rte_intr_efd_disable(intr_handle);
672 	if (intr_handle->intr_vec != NULL) {
673 		rte_free(intr_handle->intr_vec);
674 		intr_handle->intr_vec = NULL;
675 	}
676 }
677 
678 /*
679  * Set device link up: enable tx.
680  */
681 static int
682 atl_dev_set_link_up(struct rte_eth_dev *dev)
683 {
684 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
685 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
686 	uint32_t speed_mask = 0;
687 
688 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
689 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
690 	} else {
691 		if (link_speeds & ETH_LINK_SPEED_10G)
692 			speed_mask |= AQ_NIC_RATE_10G;
693 		if (link_speeds & ETH_LINK_SPEED_5G)
694 			speed_mask |= AQ_NIC_RATE_5G;
695 		if (link_speeds & ETH_LINK_SPEED_1G)
696 			speed_mask |= AQ_NIC_RATE_1G;
697 		if (link_speeds & ETH_LINK_SPEED_2_5G)
698 			speed_mask |=  AQ_NIC_RATE_2G5;
699 		if (link_speeds & ETH_LINK_SPEED_100M)
700 			speed_mask |= AQ_NIC_RATE_100M;
701 	}
702 
703 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
704 }
705 
706 /*
707  * Set device link down: disable tx.
708  */
709 static int
710 atl_dev_set_link_down(struct rte_eth_dev *dev)
711 {
712 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
713 
714 	return hw->aq_fw_ops->set_link_speed(hw, 0);
715 }
716 
717 /*
718  * Reset and stop device.
719  */
720 static void
721 atl_dev_close(struct rte_eth_dev *dev)
722 {
723 	PMD_INIT_FUNC_TRACE();
724 
725 	atl_dev_stop(dev);
726 
727 	atl_free_queues(dev);
728 }
729 
730 static int
731 atl_dev_reset(struct rte_eth_dev *dev)
732 {
733 	int ret;
734 
735 	ret = eth_atl_dev_uninit(dev);
736 	if (ret)
737 		return ret;
738 
739 	ret = eth_atl_dev_init(dev);
740 
741 	return ret;
742 }
743 
744 static int
745 atl_dev_configure_macsec(struct rte_eth_dev *dev)
746 {
747 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
748 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
749 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
750 	struct macsec_msg_fw_request msg_macsec;
751 	struct macsec_msg_fw_response response;
752 
753 	if (!aqcfg->common.macsec_enabled ||
754 	    hw->aq_fw_ops->send_macsec_req == NULL)
755 		return 0;
756 
757 	memset(&msg_macsec, 0, sizeof(msg_macsec));
758 
759 	/* Creating set of sc/sa structures from parameters provided by DPDK */
760 
761 	/* Configure macsec */
762 	msg_macsec.msg_type = macsec_cfg_msg;
763 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
764 	msg_macsec.cfg.interrupts_enabled = 1;
765 
766 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
767 
768 	if (response.result)
769 		return -1;
770 
771 	memset(&msg_macsec, 0, sizeof(msg_macsec));
772 
773 	/* Configure TX SC */
774 
775 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
776 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
777 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
778 
779 	/* MAC addr for TX */
780 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
781 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
782 	msg_macsec.txsc.sa_mask = 0x3f;
783 
784 	msg_macsec.txsc.da_mask = 0;
785 	msg_macsec.txsc.tci = 0x0B;
786 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
787 
788 	/*
789 	 * Creating SCI (Secure Channel Identifier).
790 	 * SCI constructed from Source MAC and Port identifier
791 	 */
792 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
793 			       (msg_macsec.txsc.mac_sa[0] >> 16);
794 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
795 
796 	uint32_t port_identifier = 1;
797 
798 	msg_macsec.txsc.sci[1] = sci_hi_part;
799 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
800 
801 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
802 
803 	if (response.result)
804 		return -1;
805 
806 	memset(&msg_macsec, 0, sizeof(msg_macsec));
807 
808 	/* Configure RX SC */
809 
810 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
811 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
812 	msg_macsec.rxsc.replay_protect =
813 		aqcfg->common.replay_protection_enabled;
814 	msg_macsec.rxsc.anti_replay_window = 0;
815 
816 	/* MAC addr for RX */
817 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
818 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
819 	msg_macsec.rxsc.da_mask = 0;//0x3f;
820 
821 	msg_macsec.rxsc.sa_mask = 0;
822 
823 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
824 
825 	if (response.result)
826 		return -1;
827 
828 	memset(&msg_macsec, 0, sizeof(msg_macsec));
829 
830 	/* Configure RX SC */
831 
832 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
833 	msg_macsec.txsa.index = aqcfg->txsa.idx;
834 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
835 
836 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
837 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
838 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
839 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
840 
841 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
842 
843 	if (response.result)
844 		return -1;
845 
846 	memset(&msg_macsec, 0, sizeof(msg_macsec));
847 
848 	/* Configure RX SA */
849 
850 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
851 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
852 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
853 
854 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
855 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
856 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
857 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
858 
859 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
860 
861 	if (response.result)
862 		return -1;
863 
864 	return 0;
865 }
866 
867 int atl_macsec_enable(struct rte_eth_dev *dev,
868 		      uint8_t encr, uint8_t repl_prot)
869 {
870 	struct aq_hw_cfg_s *cfg =
871 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
872 
873 	cfg->aq_macsec.common.macsec_enabled = 1;
874 	cfg->aq_macsec.common.encryption_enabled = encr;
875 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
876 
877 	return 0;
878 }
879 
880 int atl_macsec_disable(struct rte_eth_dev *dev)
881 {
882 	struct aq_hw_cfg_s *cfg =
883 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
884 
885 	cfg->aq_macsec.common.macsec_enabled = 0;
886 
887 	return 0;
888 }
889 
890 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
891 {
892 	struct aq_hw_cfg_s *cfg =
893 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
894 
895 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
896 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
897 		RTE_ETHER_ADDR_LEN);
898 
899 	return 0;
900 }
901 
902 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
903 			   uint8_t *mac, uint16_t pi)
904 {
905 	struct aq_hw_cfg_s *cfg =
906 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
907 
908 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
909 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
910 		RTE_ETHER_ADDR_LEN);
911 	cfg->aq_macsec.rxsc.pi = pi;
912 
913 	return 0;
914 }
915 
916 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
917 			   uint8_t idx, uint8_t an,
918 			   uint32_t pn, uint8_t *key)
919 {
920 	struct aq_hw_cfg_s *cfg =
921 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
922 
923 	cfg->aq_macsec.txsa.idx = idx;
924 	cfg->aq_macsec.txsa.pn = pn;
925 	cfg->aq_macsec.txsa.an = an;
926 
927 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
928 	return 0;
929 }
930 
931 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
932 			   uint8_t idx, uint8_t an,
933 			   uint32_t pn, uint8_t *key)
934 {
935 	struct aq_hw_cfg_s *cfg =
936 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
937 
938 	cfg->aq_macsec.rxsa.idx = idx;
939 	cfg->aq_macsec.rxsa.pn = pn;
940 	cfg->aq_macsec.rxsa.an = an;
941 
942 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
943 	return 0;
944 }
945 
946 static int
947 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
948 {
949 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
950 	struct aq_hw_s *hw = &adapter->hw;
951 	struct atl_sw_stats *swstats = &adapter->sw_stats;
952 	unsigned int i;
953 
954 	hw->aq_fw_ops->update_stats(hw);
955 
956 	/* Fill out the rte_eth_stats statistics structure */
957 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
958 	stats->ibytes = hw->curr_stats.dma_oct_rc;
959 	stats->imissed = hw->curr_stats.dpc;
960 	stats->ierrors = hw->curr_stats.erpt;
961 
962 	stats->opackets = hw->curr_stats.dma_pkt_tc;
963 	stats->obytes = hw->curr_stats.dma_oct_tc;
964 	stats->oerrors = 0;
965 
966 	stats->rx_nombuf = swstats->rx_nombuf;
967 
968 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
969 		stats->q_ipackets[i] = swstats->q_ipackets[i];
970 		stats->q_opackets[i] = swstats->q_opackets[i];
971 		stats->q_ibytes[i] = swstats->q_ibytes[i];
972 		stats->q_obytes[i] = swstats->q_obytes[i];
973 		stats->q_errors[i] = swstats->q_errors[i];
974 	}
975 	return 0;
976 }
977 
978 static int
979 atl_dev_stats_reset(struct rte_eth_dev *dev)
980 {
981 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
982 	struct aq_hw_s *hw = &adapter->hw;
983 
984 	hw->aq_fw_ops->update_stats(hw);
985 
986 	/* Reset software totals */
987 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
988 
989 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
990 
991 	return 0;
992 }
993 
994 static int
995 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
996 			 struct rte_eth_xstat_name *xstats_names,
997 			 unsigned int size)
998 {
999 	unsigned int i;
1000 
1001 	if (!xstats_names)
1002 		return RTE_DIM(atl_xstats_tbl);
1003 
1004 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
1005 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
1006 			RTE_ETH_XSTATS_NAME_SIZE);
1007 
1008 	return i;
1009 }
1010 
1011 static int
1012 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1013 		   unsigned int n)
1014 {
1015 	struct atl_adapter *adapter = dev->data->dev_private;
1016 	struct aq_hw_s *hw = &adapter->hw;
1017 	struct get_stats req = { 0 };
1018 	struct macsec_msg_fw_request msg = { 0 };
1019 	struct macsec_msg_fw_response resp = { 0 };
1020 	int err = -1;
1021 	unsigned int i;
1022 
1023 	if (!stats)
1024 		return 0;
1025 
1026 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1027 		req.ingress_sa_index = 0xff;
1028 		req.egress_sc_index = 0xff;
1029 		req.egress_sa_index = 0xff;
1030 
1031 		msg.msg_type = macsec_get_stats_msg;
1032 		msg.stats = req;
1033 
1034 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1035 	}
1036 
1037 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
1038 		stats[i].id = i;
1039 
1040 		switch (atl_xstats_tbl[i].type) {
1041 		case XSTATS_TYPE_MSM:
1042 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1043 					 atl_xstats_tbl[i].offset);
1044 			break;
1045 		case XSTATS_TYPE_MACSEC:
1046 			if (err)
1047 				goto done;
1048 			stats[i].value = *(u64 *)((uint8_t *)&resp.stats +
1049 					 atl_xstats_tbl[i].offset);
1050 			break;
1051 		}
1052 	}
1053 done:
1054 	return i;
1055 }
1056 
1057 static int
1058 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1059 {
1060 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1061 	uint32_t fw_ver = 0;
1062 	unsigned int ret = 0;
1063 
1064 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1065 	if (ret)
1066 		return -EIO;
1067 
1068 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1069 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1070 
1071 	ret += 1; /* add string null-terminator */
1072 
1073 	if (fw_size < ret)
1074 		return ret;
1075 
1076 	return 0;
1077 }
1078 
1079 static int
1080 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1081 {
1082 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1083 
1084 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1085 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1086 
1087 	dev_info->min_rx_bufsize = 1024;
1088 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1089 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1090 	dev_info->max_vfs = pci_dev->max_vfs;
1091 
1092 	dev_info->max_hash_mac_addrs = 0;
1093 	dev_info->max_vmdq_pools = 0;
1094 	dev_info->vmdq_queue_num = 0;
1095 
1096 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1097 
1098 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1099 
1100 
1101 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1102 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1103 	};
1104 
1105 	dev_info->default_txconf = (struct rte_eth_txconf) {
1106 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1107 	};
1108 
1109 	dev_info->rx_desc_lim = rx_desc_lim;
1110 	dev_info->tx_desc_lim = tx_desc_lim;
1111 
1112 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1113 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1114 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1115 
1116 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1117 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1118 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1119 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1120 
1121 	return 0;
1122 }
1123 
1124 static const uint32_t *
1125 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1126 {
1127 	static const uint32_t ptypes[] = {
1128 		RTE_PTYPE_L2_ETHER,
1129 		RTE_PTYPE_L2_ETHER_ARP,
1130 		RTE_PTYPE_L2_ETHER_VLAN,
1131 		RTE_PTYPE_L3_IPV4,
1132 		RTE_PTYPE_L3_IPV6,
1133 		RTE_PTYPE_L4_TCP,
1134 		RTE_PTYPE_L4_UDP,
1135 		RTE_PTYPE_L4_SCTP,
1136 		RTE_PTYPE_L4_ICMP,
1137 		RTE_PTYPE_UNKNOWN
1138 	};
1139 
1140 	if (dev->rx_pkt_burst == atl_recv_pkts)
1141 		return ptypes;
1142 
1143 	return NULL;
1144 }
1145 
1146 static void
1147 atl_dev_delayed_handler(void *param)
1148 {
1149 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1150 
1151 	atl_dev_configure_macsec(dev);
1152 }
1153 
1154 
1155 /* return 0 means link status changed, -1 means not changed */
1156 static int
1157 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1158 {
1159 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1160 	struct rte_eth_link link, old;
1161 	u32 fc = AQ_NIC_FC_OFF;
1162 	int err = 0;
1163 
1164 	link.link_status = ETH_LINK_DOWN;
1165 	link.link_speed = 0;
1166 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1167 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1168 	memset(&old, 0, sizeof(old));
1169 
1170 	/* load old link status */
1171 	rte_eth_linkstatus_get(dev, &old);
1172 
1173 	/* read current link status */
1174 	err = hw->aq_fw_ops->update_link_status(hw);
1175 
1176 	if (err)
1177 		return 0;
1178 
1179 	if (hw->aq_link_status.mbps == 0) {
1180 		/* write default (down) link status */
1181 		rte_eth_linkstatus_set(dev, &link);
1182 		if (link.link_status == old.link_status)
1183 			return -1;
1184 		return 0;
1185 	}
1186 
1187 	link.link_status = ETH_LINK_UP;
1188 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1189 	link.link_speed = hw->aq_link_status.mbps;
1190 
1191 	rte_eth_linkstatus_set(dev, &link);
1192 
1193 	if (link.link_status == old.link_status)
1194 		return -1;
1195 
1196 	/* Driver has to update flow control settings on RX block
1197 	 * on any link event.
1198 	 * We should query FW whether it negotiated FC.
1199 	 */
1200 	if (hw->aq_fw_ops->get_flow_control) {
1201 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1202 		hw_atl_b0_set_fc(hw, fc, 0U);
1203 	}
1204 
1205 	if (rte_eal_alarm_set(1000 * 1000,
1206 			      atl_dev_delayed_handler, (void *)dev) < 0)
1207 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1208 
1209 	return 0;
1210 }
1211 
1212 static int
1213 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1214 {
1215 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1216 
1217 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1218 
1219 	return 0;
1220 }
1221 
1222 static int
1223 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1224 {
1225 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1226 
1227 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1228 
1229 	return 0;
1230 }
1231 
1232 static int
1233 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1234 {
1235 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1236 
1237 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1238 
1239 	return 0;
1240 }
1241 
1242 static int
1243 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1244 {
1245 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1246 
1247 	if (dev->data->promiscuous == 1)
1248 		return 0; /* must remain in all_multicast mode */
1249 
1250 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1251 
1252 	return 0;
1253 }
1254 
1255 /**
1256  * It clears the interrupt causes and enables the interrupt.
1257  * It will be called once only during nic initialized.
1258  *
1259  * @param dev
1260  *  Pointer to struct rte_eth_dev.
1261  * @param on
1262  *  Enable or Disable.
1263  *
1264  * @return
1265  *  - On success, zero.
1266  *  - On failure, a negative value.
1267  */
1268 
1269 static int
1270 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1271 {
1272 	atl_dev_link_status_print(dev);
1273 	return 0;
1274 }
1275 
1276 static int
1277 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1278 {
1279 	return 0;
1280 }
1281 
1282 
1283 static int
1284 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1285 {
1286 	struct atl_interrupt *intr =
1287 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1288 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1289 	u64 cause = 0;
1290 
1291 	hw_atl_b0_hw_irq_read(hw, &cause);
1292 
1293 	atl_disable_intr(hw);
1294 
1295 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1296 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1297 
1298 	return 0;
1299 }
1300 
1301 /**
1302  * It gets and then prints the link status.
1303  *
1304  * @param dev
1305  *  Pointer to struct rte_eth_dev.
1306  *
1307  * @return
1308  *  - On success, zero.
1309  *  - On failure, a negative value.
1310  */
1311 static void
1312 atl_dev_link_status_print(struct rte_eth_dev *dev)
1313 {
1314 	struct rte_eth_link link;
1315 
1316 	memset(&link, 0, sizeof(link));
1317 	rte_eth_linkstatus_get(dev, &link);
1318 	if (link.link_status) {
1319 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1320 					(int)(dev->data->port_id),
1321 					(unsigned int)link.link_speed,
1322 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1323 					"full-duplex" : "half-duplex");
1324 	} else {
1325 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1326 				(int)(dev->data->port_id));
1327 	}
1328 
1329 
1330 #ifdef DEBUG
1331 {
1332 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1333 
1334 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1335 				pci_dev->addr.domain,
1336 				pci_dev->addr.bus,
1337 				pci_dev->addr.devid,
1338 				pci_dev->addr.function);
1339 }
1340 #endif
1341 
1342 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1343 }
1344 
1345 /*
1346  * It executes link_update after knowing an interrupt occurred.
1347  *
1348  * @param dev
1349  *  Pointer to struct rte_eth_dev.
1350  *
1351  * @return
1352  *  - On success, zero.
1353  *  - On failure, a negative value.
1354  */
1355 static int
1356 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1357 			   struct rte_intr_handle *intr_handle)
1358 {
1359 	struct atl_interrupt *intr =
1360 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1361 	struct atl_adapter *adapter = dev->data->dev_private;
1362 	struct aq_hw_s *hw = &adapter->hw;
1363 
1364 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1365 		goto done;
1366 
1367 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1368 
1369 	/* Notify userapp if link status changed */
1370 	if (!atl_dev_link_update(dev, 0)) {
1371 		atl_dev_link_status_print(dev);
1372 		_rte_eth_dev_callback_process(dev,
1373 			RTE_ETH_EVENT_INTR_LSC, NULL);
1374 	} else {
1375 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1376 			goto done;
1377 
1378 		/* Check macsec Keys expired */
1379 		struct get_stats req = { 0 };
1380 		struct macsec_msg_fw_request msg = { 0 };
1381 		struct macsec_msg_fw_response resp = { 0 };
1382 
1383 		req.ingress_sa_index = 0x0;
1384 		req.egress_sc_index = 0x0;
1385 		req.egress_sa_index = 0x0;
1386 		msg.msg_type = macsec_get_stats_msg;
1387 		msg.stats = req;
1388 
1389 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1390 		if (err) {
1391 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1392 			goto done;
1393 		}
1394 		if (resp.stats.egress_threshold_expired ||
1395 		    resp.stats.ingress_threshold_expired ||
1396 		    resp.stats.egress_expired ||
1397 		    resp.stats.ingress_expired) {
1398 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1399 			_rte_eth_dev_callback_process(dev,
1400 				RTE_ETH_EVENT_MACSEC, NULL);
1401 		}
1402 	}
1403 done:
1404 	atl_enable_intr(dev);
1405 	rte_intr_ack(intr_handle);
1406 
1407 	return 0;
1408 }
1409 
1410 /**
1411  * Interrupt handler triggered by NIC  for handling
1412  * specific interrupt.
1413  *
1414  * @param handle
1415  *  Pointer to interrupt handle.
1416  * @param param
1417  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1418  *
1419  * @return
1420  *  void
1421  */
1422 static void
1423 atl_dev_interrupt_handler(void *param)
1424 {
1425 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1426 
1427 	atl_dev_interrupt_get_status(dev);
1428 	atl_dev_interrupt_action(dev, dev->intr_handle);
1429 }
1430 
1431 
1432 static int
1433 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1434 {
1435 	return SFP_EEPROM_SIZE;
1436 }
1437 
1438 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1439 		       struct rte_dev_eeprom_info *eeprom)
1440 {
1441 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1442 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1443 
1444 	if (hw->aq_fw_ops->get_eeprom == NULL)
1445 		return -ENOTSUP;
1446 
1447 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1448 	    eeprom->data == NULL)
1449 		return -EINVAL;
1450 
1451 	if (eeprom->magic > 0x7F)
1452 		return -EINVAL;
1453 
1454 	if (eeprom->magic)
1455 		dev_addr = eeprom->magic;
1456 
1457 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1458 					 eeprom->length, eeprom->offset);
1459 }
1460 
1461 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1462 		       struct rte_dev_eeprom_info *eeprom)
1463 {
1464 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1465 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1466 
1467 	if (hw->aq_fw_ops->set_eeprom == NULL)
1468 		return -ENOTSUP;
1469 
1470 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1471 	    eeprom->data == NULL)
1472 		return -EINVAL;
1473 
1474 	if (eeprom->magic > 0x7F)
1475 		return -EINVAL;
1476 
1477 	if (eeprom->magic)
1478 		dev_addr = eeprom->magic;
1479 
1480 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1481 					 eeprom->length, eeprom->offset);
1482 }
1483 
1484 static int
1485 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1486 {
1487 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1488 	u32 mif_id;
1489 	int err;
1490 
1491 	if (regs->data == NULL) {
1492 		regs->length = hw_atl_utils_hw_get_reg_length();
1493 		regs->width = sizeof(u32);
1494 		return 0;
1495 	}
1496 
1497 	/* Only full register dump is supported */
1498 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1499 		return -ENOTSUP;
1500 
1501 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1502 
1503 	/* Device version */
1504 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1505 	regs->version = mif_id & 0xFFU;
1506 
1507 	return err;
1508 }
1509 
1510 static int
1511 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1512 {
1513 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1514 	u32 fc = AQ_NIC_FC_OFF;
1515 
1516 	if (hw->aq_fw_ops->get_flow_control == NULL)
1517 		return -ENOTSUP;
1518 
1519 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1520 
1521 	if (fc == AQ_NIC_FC_OFF)
1522 		fc_conf->mode = RTE_FC_NONE;
1523 	else if (fc & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1524 		fc_conf->mode = RTE_FC_FULL;
1525 	else if (fc & AQ_NIC_FC_RX)
1526 		fc_conf->mode = RTE_FC_RX_PAUSE;
1527 	else if (fc & AQ_NIC_FC_RX)
1528 		fc_conf->mode = RTE_FC_TX_PAUSE;
1529 
1530 	return 0;
1531 }
1532 
1533 static int
1534 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1535 {
1536 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1537 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1538 
1539 
1540 	if (hw->aq_fw_ops->set_flow_control == NULL)
1541 		return -ENOTSUP;
1542 
1543 	if (fc_conf->mode == RTE_FC_NONE)
1544 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1545 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1546 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1547 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1548 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1549 	else if (fc_conf->mode == RTE_FC_FULL)
1550 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1551 
1552 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1553 		return hw->aq_fw_ops->set_flow_control(hw);
1554 
1555 	return 0;
1556 }
1557 
1558 static int
1559 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1560 		    u8 *mac_addr, bool enable)
1561 {
1562 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1563 	unsigned int h = 0U;
1564 	unsigned int l = 0U;
1565 	int err;
1566 
1567 	if (mac_addr) {
1568 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1569 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1570 			(mac_addr[4] << 8) | mac_addr[5];
1571 	}
1572 
1573 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1574 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1575 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1576 
1577 	if (enable)
1578 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1579 
1580 	err = aq_hw_err_from_flags(hw);
1581 
1582 	return err;
1583 }
1584 
1585 static int
1586 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1587 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1588 {
1589 	if (rte_is_zero_ether_addr(mac_addr)) {
1590 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1591 		return -EINVAL;
1592 	}
1593 
1594 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1595 }
1596 
1597 static void
1598 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1599 {
1600 	atl_update_mac_addr(dev, index, NULL, false);
1601 }
1602 
1603 static int
1604 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1605 {
1606 	atl_remove_mac_addr(dev, 0);
1607 	atl_add_mac_addr(dev, addr, 0, 0);
1608 	return 0;
1609 }
1610 
1611 static int
1612 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1613 {
1614 	struct rte_eth_dev_info dev_info;
1615 	int ret;
1616 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1617 
1618 	ret = atl_dev_info_get(dev, &dev_info);
1619 	if (ret != 0)
1620 		return ret;
1621 
1622 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1623 		return -EINVAL;
1624 
1625 	/* update max frame size */
1626 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1627 
1628 	return 0;
1629 }
1630 
1631 static int
1632 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1633 {
1634 	struct aq_hw_cfg_s *cfg =
1635 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1636 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1637 	int err = 0;
1638 	int i = 0;
1639 
1640 	PMD_INIT_FUNC_TRACE();
1641 
1642 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1643 		if (cfg->vlan_filter[i] == vlan_id) {
1644 			if (!on) {
1645 				/* Disable VLAN filter. */
1646 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1647 
1648 				/* Clear VLAN filter entry */
1649 				cfg->vlan_filter[i] = 0;
1650 			}
1651 			break;
1652 		}
1653 	}
1654 
1655 	/* VLAN_ID was not found. So, nothing to delete. */
1656 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1657 		goto exit;
1658 
1659 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1660 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1661 		goto exit;
1662 
1663 	/* Try to found free VLAN filter to add new VLAN_ID */
1664 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1665 		if (cfg->vlan_filter[i] == 0)
1666 			break;
1667 	}
1668 
1669 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1670 		/* We have no free VLAN filter to add new VLAN_ID*/
1671 		err = -ENOMEM;
1672 		goto exit;
1673 	}
1674 
1675 	cfg->vlan_filter[i] = vlan_id;
1676 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1677 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1678 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1679 
1680 exit:
1681 	/* Enable VLAN promisc mode if vlan_filter empty  */
1682 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1683 		if (cfg->vlan_filter[i] != 0)
1684 			break;
1685 	}
1686 
1687 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1688 
1689 	return err;
1690 }
1691 
1692 static int
1693 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1694 {
1695 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1696 	struct aq_hw_cfg_s *cfg =
1697 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1698 	int i;
1699 
1700 	PMD_INIT_FUNC_TRACE();
1701 
1702 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1703 		if (cfg->vlan_filter[i])
1704 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1705 	}
1706 	return 0;
1707 }
1708 
1709 static int
1710 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1711 {
1712 	struct aq_hw_cfg_s *cfg =
1713 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1714 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1715 	int ret = 0;
1716 	int i;
1717 
1718 	PMD_INIT_FUNC_TRACE();
1719 
1720 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1721 
1722 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1723 
1724 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1725 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1726 
1727 	if (mask & ETH_VLAN_EXTEND_MASK)
1728 		ret = -ENOTSUP;
1729 
1730 	return ret;
1731 }
1732 
1733 static int
1734 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1735 		  uint16_t tpid)
1736 {
1737 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1738 	int err = 0;
1739 
1740 	PMD_INIT_FUNC_TRACE();
1741 
1742 	switch (vlan_type) {
1743 	case ETH_VLAN_TYPE_INNER:
1744 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1745 		break;
1746 	case ETH_VLAN_TYPE_OUTER:
1747 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1748 		break;
1749 	default:
1750 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1751 		err = -ENOTSUP;
1752 	}
1753 
1754 	return err;
1755 }
1756 
1757 static void
1758 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1759 {
1760 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1761 
1762 	PMD_INIT_FUNC_TRACE();
1763 
1764 	if (queue_id > dev->data->nb_rx_queues) {
1765 		PMD_DRV_LOG(ERR, "Invalid queue id");
1766 		return;
1767 	}
1768 
1769 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1770 }
1771 
1772 static int
1773 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1774 			  struct rte_ether_addr *mc_addr_set,
1775 			  uint32_t nb_mc_addr)
1776 {
1777 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1778 	u32 i;
1779 
1780 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1781 		return -EINVAL;
1782 
1783 	/* Update whole uc filters table */
1784 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1785 		u8 *mac_addr = NULL;
1786 		u32 l = 0, h = 0;
1787 
1788 		if (i < nb_mc_addr) {
1789 			mac_addr = mc_addr_set[i].addr_bytes;
1790 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1791 				(mac_addr[4] << 8) | mac_addr[5];
1792 			h = (mac_addr[0] << 8) | mac_addr[1];
1793 		}
1794 
1795 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1796 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1797 							HW_ATL_B0_MAC_MIN + i);
1798 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1799 							HW_ATL_B0_MAC_MIN + i);
1800 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1801 					   HW_ATL_B0_MAC_MIN + i);
1802 	}
1803 
1804 	return 0;
1805 }
1806 
1807 static int
1808 atl_reta_update(struct rte_eth_dev *dev,
1809 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1810 		   uint16_t reta_size)
1811 {
1812 	int i;
1813 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1814 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1815 
1816 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1817 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1818 					dev->data->nb_rx_queues - 1);
1819 
1820 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1821 	return 0;
1822 }
1823 
1824 static int
1825 atl_reta_query(struct rte_eth_dev *dev,
1826 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1827 		    uint16_t reta_size)
1828 {
1829 	int i;
1830 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1831 
1832 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1833 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1834 	reta_conf->mask = ~0U;
1835 	return 0;
1836 }
1837 
1838 static int
1839 atl_rss_hash_update(struct rte_eth_dev *dev,
1840 				 struct rte_eth_rss_conf *rss_conf)
1841 {
1842 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1843 	struct aq_hw_cfg_s *cfg =
1844 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1845 	static u8 def_rss_key[40] = {
1846 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1847 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1848 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1849 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1850 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1851 	};
1852 
1853 	cfg->is_rss = !!rss_conf->rss_hf;
1854 	if (rss_conf->rss_key) {
1855 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1856 		       rss_conf->rss_key_len);
1857 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1858 	} else {
1859 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1860 		       sizeof(def_rss_key));
1861 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1862 	}
1863 
1864 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1865 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1866 	return 0;
1867 }
1868 
1869 static int
1870 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1871 				 struct rte_eth_rss_conf *rss_conf)
1872 {
1873 	struct aq_hw_cfg_s *cfg =
1874 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1875 
1876 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1877 	if (rss_conf->rss_key) {
1878 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1879 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1880 		       rss_conf->rss_key_len);
1881 	}
1882 
1883 	return 0;
1884 }
1885 
1886 static bool
1887 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1888 {
1889 	if (strcmp(dev->device->driver->name, drv->driver.name))
1890 		return false;
1891 
1892 	return true;
1893 }
1894 
1895 bool
1896 is_atlantic_supported(struct rte_eth_dev *dev)
1897 {
1898 	return is_device_supported(dev, &rte_atl_pmd);
1899 }
1900 
1901 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1902 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1903 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1904 
1905 RTE_INIT(atl_init_log)
1906 {
1907 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1908 	if (atl_logtype_init >= 0)
1909 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1910 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1911 	if (atl_logtype_driver >= 0)
1912 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1913 }
1914