xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision 54ad947eda42042d2bdae69b57d0c7c8e291d9ec)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 				    struct rte_eth_xstat_name *xstats_names,
35 				    unsigned int size);
36 
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 				struct rte_eth_stats *stats);
39 
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 			      struct rte_eth_xstat *stats, unsigned int n);
42 
43 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
44 
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 			      size_t fw_size);
47 
48 static void atl_dev_info_get(struct rte_eth_dev *dev,
49 			       struct rte_eth_dev_info *dev_info);
50 
51 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 
53 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 
55 /* VLAN stuff */
56 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
57 		uint16_t vlan_id, int on);
58 
59 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
60 
61 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
62 				     uint16_t queue_id, int on);
63 
64 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
65 			     enum rte_vlan_type vlan_type, uint16_t tpid);
66 
67 /* EEPROM */
68 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
69 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
72 			      struct rte_dev_eeprom_info *eeprom);
73 
74 /* Regs */
75 static int atl_dev_get_regs(struct rte_eth_dev *dev,
76 			    struct rte_dev_reg_info *regs);
77 
78 /* Flow control */
79 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
82 			       struct rte_eth_fc_conf *fc_conf);
83 
84 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
85 
86 /* Interrupts */
87 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
88 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
89 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
90 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
91 				    struct rte_intr_handle *handle);
92 static void atl_dev_interrupt_handler(void *param);
93 
94 
95 static int atl_add_mac_addr(struct rte_eth_dev *dev,
96 			    struct rte_ether_addr *mac_addr,
97 			    uint32_t index, uint32_t pool);
98 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
99 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
100 					   struct rte_ether_addr *mac_addr);
101 
102 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
103 				    struct rte_ether_addr *mc_addr_set,
104 				    uint32_t nb_mc_addr);
105 
106 /* RSS */
107 static int atl_reta_update(struct rte_eth_dev *dev,
108 			     struct rte_eth_rss_reta_entry64 *reta_conf,
109 			     uint16_t reta_size);
110 static int atl_reta_query(struct rte_eth_dev *dev,
111 			    struct rte_eth_rss_reta_entry64 *reta_conf,
112 			    uint16_t reta_size);
113 static int atl_rss_hash_update(struct rte_eth_dev *dev,
114 				 struct rte_eth_rss_conf *rss_conf);
115 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
116 				   struct rte_eth_rss_conf *rss_conf);
117 
118 
119 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
120 	struct rte_pci_device *pci_dev);
121 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
122 
123 static void atl_dev_info_get(struct rte_eth_dev *dev,
124 				struct rte_eth_dev_info *dev_info);
125 
126 int atl_logtype_init;
127 int atl_logtype_driver;
128 
129 /*
130  * The set of PCI devices this driver supports
131  */
132 static const struct rte_pci_id pci_id_atl_map[] = {
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
138 
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
145 
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
152 
153 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
154 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
155 	{ .vendor_id = 0, /* sentinel */ },
156 };
157 
158 static struct rte_pci_driver rte_atl_pmd = {
159 	.id_table = pci_id_atl_map,
160 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
161 	.probe = eth_atl_pci_probe,
162 	.remove = eth_atl_pci_remove,
163 };
164 
165 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
166 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
167 			| DEV_RX_OFFLOAD_UDP_CKSUM \
168 			| DEV_RX_OFFLOAD_TCP_CKSUM \
169 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
170 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
171 			| DEV_RX_OFFLOAD_VLAN_FILTER)
172 
173 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
174 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
175 			| DEV_TX_OFFLOAD_UDP_CKSUM \
176 			| DEV_TX_OFFLOAD_TCP_CKSUM \
177 			| DEV_TX_OFFLOAD_TCP_TSO \
178 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
179 			| DEV_TX_OFFLOAD_MULTI_SEGS)
180 
181 #define SFP_EEPROM_SIZE 0x100
182 
183 static const struct rte_eth_desc_lim rx_desc_lim = {
184 	.nb_max = ATL_MAX_RING_DESC,
185 	.nb_min = ATL_MIN_RING_DESC,
186 	.nb_align = ATL_RXD_ALIGN,
187 };
188 
189 static const struct rte_eth_desc_lim tx_desc_lim = {
190 	.nb_max = ATL_MAX_RING_DESC,
191 	.nb_min = ATL_MIN_RING_DESC,
192 	.nb_align = ATL_TXD_ALIGN,
193 	.nb_seg_max = ATL_TX_MAX_SEG,
194 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
195 };
196 
197 enum atl_xstats_type {
198 	XSTATS_TYPE_MSM = 0,
199 	XSTATS_TYPE_MACSEC,
200 };
201 
202 #define ATL_XSTATS_FIELD(name) { \
203 	#name, \
204 	offsetof(struct aq_stats_s, name), \
205 	XSTATS_TYPE_MSM \
206 }
207 
208 #define ATL_MACSEC_XSTATS_FIELD(name) { \
209 	#name, \
210 	offsetof(struct macsec_stats, name), \
211 	XSTATS_TYPE_MACSEC \
212 }
213 
214 struct atl_xstats_tbl_s {
215 	const char *name;
216 	unsigned int offset;
217 	enum atl_xstats_type type;
218 };
219 
220 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
221 	ATL_XSTATS_FIELD(uprc),
222 	ATL_XSTATS_FIELD(mprc),
223 	ATL_XSTATS_FIELD(bprc),
224 	ATL_XSTATS_FIELD(erpt),
225 	ATL_XSTATS_FIELD(uptc),
226 	ATL_XSTATS_FIELD(mptc),
227 	ATL_XSTATS_FIELD(bptc),
228 	ATL_XSTATS_FIELD(erpr),
229 	ATL_XSTATS_FIELD(ubrc),
230 	ATL_XSTATS_FIELD(ubtc),
231 	ATL_XSTATS_FIELD(mbrc),
232 	ATL_XSTATS_FIELD(mbtc),
233 	ATL_XSTATS_FIELD(bbrc),
234 	ATL_XSTATS_FIELD(bbtc),
235 	/* Ingress Common Counters */
236 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
237 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
239 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
244 	/* Ingress SA Counters */
245 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
246 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
247 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
248 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
250 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
251 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
252 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
253 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
254 	/* Egress Common Counters */
255 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
256 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
259 	/* Egress SC Counters */
260 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
261 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
262 	/* Egress SA Counters */
263 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
264 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
265 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
266 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
267 };
268 
269 static const struct eth_dev_ops atl_eth_dev_ops = {
270 	.dev_configure	      = atl_dev_configure,
271 	.dev_start	      = atl_dev_start,
272 	.dev_stop	      = atl_dev_stop,
273 	.dev_set_link_up      = atl_dev_set_link_up,
274 	.dev_set_link_down    = atl_dev_set_link_down,
275 	.dev_close	      = atl_dev_close,
276 	.dev_reset	      = atl_dev_reset,
277 
278 	/* PROMISC */
279 	.promiscuous_enable   = atl_dev_promiscuous_enable,
280 	.promiscuous_disable  = atl_dev_promiscuous_disable,
281 	.allmulticast_enable  = atl_dev_allmulticast_enable,
282 	.allmulticast_disable = atl_dev_allmulticast_disable,
283 
284 	/* Link */
285 	.link_update	      = atl_dev_link_update,
286 
287 	.get_reg              = atl_dev_get_regs,
288 
289 	/* Stats */
290 	.stats_get	      = atl_dev_stats_get,
291 	.xstats_get	      = atl_dev_xstats_get,
292 	.xstats_get_names     = atl_dev_xstats_get_names,
293 	.stats_reset	      = atl_dev_stats_reset,
294 	.xstats_reset	      = atl_dev_stats_reset,
295 
296 	.fw_version_get       = atl_fw_version_get,
297 	.dev_infos_get	      = atl_dev_info_get,
298 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
299 
300 	.mtu_set              = atl_dev_mtu_set,
301 
302 	/* VLAN */
303 	.vlan_filter_set      = atl_vlan_filter_set,
304 	.vlan_offload_set     = atl_vlan_offload_set,
305 	.vlan_tpid_set        = atl_vlan_tpid_set,
306 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
307 
308 	/* Queue Control */
309 	.rx_queue_start	      = atl_rx_queue_start,
310 	.rx_queue_stop	      = atl_rx_queue_stop,
311 	.rx_queue_setup       = atl_rx_queue_setup,
312 	.rx_queue_release     = atl_rx_queue_release,
313 
314 	.tx_queue_start	      = atl_tx_queue_start,
315 	.tx_queue_stop	      = atl_tx_queue_stop,
316 	.tx_queue_setup       = atl_tx_queue_setup,
317 	.tx_queue_release     = atl_tx_queue_release,
318 
319 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
320 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
321 
322 	.rx_queue_count       = atl_rx_queue_count,
323 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
324 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
325 
326 	/* EEPROM */
327 	.get_eeprom_length    = atl_dev_get_eeprom_length,
328 	.get_eeprom           = atl_dev_get_eeprom,
329 	.set_eeprom           = atl_dev_set_eeprom,
330 
331 	/* Flow Control */
332 	.flow_ctrl_get	      = atl_flow_ctrl_get,
333 	.flow_ctrl_set	      = atl_flow_ctrl_set,
334 
335 	/* MAC */
336 	.mac_addr_add	      = atl_add_mac_addr,
337 	.mac_addr_remove      = atl_remove_mac_addr,
338 	.mac_addr_set	      = atl_set_default_mac_addr,
339 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
340 	.rxq_info_get	      = atl_rxq_info_get,
341 	.txq_info_get	      = atl_txq_info_get,
342 
343 	.reta_update          = atl_reta_update,
344 	.reta_query           = atl_reta_query,
345 	.rss_hash_update      = atl_rss_hash_update,
346 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
347 };
348 
349 static inline int32_t
350 atl_reset_hw(struct aq_hw_s *hw)
351 {
352 	return hw_atl_b0_hw_reset(hw);
353 }
354 
355 static inline void
356 atl_enable_intr(struct rte_eth_dev *dev)
357 {
358 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
359 
360 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
361 }
362 
363 static void
364 atl_disable_intr(struct aq_hw_s *hw)
365 {
366 	PMD_INIT_FUNC_TRACE();
367 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
368 }
369 
370 static int
371 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
372 {
373 	struct atl_adapter *adapter = eth_dev->data->dev_private;
374 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
375 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
376 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
377 	int err = 0;
378 
379 	PMD_INIT_FUNC_TRACE();
380 
381 	eth_dev->dev_ops = &atl_eth_dev_ops;
382 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
383 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
384 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
385 
386 	/* For secondary processes, the primary process has done all the work */
387 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
388 		return 0;
389 
390 	/* Vendor and Device ID need to be set before init of shared code */
391 	hw->device_id = pci_dev->id.device_id;
392 	hw->vendor_id = pci_dev->id.vendor_id;
393 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
394 
395 	/* Hardware configuration - hardcode */
396 	adapter->hw_cfg.is_lro = false;
397 	adapter->hw_cfg.wol = false;
398 	adapter->hw_cfg.is_rss = false;
399 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
400 
401 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
402 			  AQ_NIC_RATE_5G |
403 			  AQ_NIC_RATE_2G5 |
404 			  AQ_NIC_RATE_1G |
405 			  AQ_NIC_RATE_100M;
406 
407 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
408 	adapter->hw_cfg.aq_rss.indirection_table_size =
409 		HW_ATL_B0_RSS_REDIRECTION_MAX;
410 
411 	hw->aq_nic_cfg = &adapter->hw_cfg;
412 
413 	/* disable interrupt */
414 	atl_disable_intr(hw);
415 
416 	/* Allocate memory for storing MAC addresses */
417 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
418 					RTE_ETHER_ADDR_LEN, 0);
419 	if (eth_dev->data->mac_addrs == NULL) {
420 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
421 		return -ENOMEM;
422 	}
423 
424 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
425 	if (err)
426 		return err;
427 
428 	/* Copy the permanent MAC address */
429 	if (hw->aq_fw_ops->get_mac_permanent(hw,
430 			eth_dev->data->mac_addrs->addr_bytes) != 0)
431 		return -EINVAL;
432 
433 	/* Reset the hw statistics */
434 	atl_dev_stats_reset(eth_dev);
435 
436 	rte_intr_callback_register(intr_handle,
437 				   atl_dev_interrupt_handler, eth_dev);
438 
439 	/* enable uio/vfio intr/eventfd mapping */
440 	rte_intr_enable(intr_handle);
441 
442 	/* enable support intr */
443 	atl_enable_intr(eth_dev);
444 
445 	return err;
446 }
447 
448 static int
449 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
450 {
451 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
452 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
453 	struct aq_hw_s *hw;
454 
455 	PMD_INIT_FUNC_TRACE();
456 
457 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
458 		return -EPERM;
459 
460 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
461 
462 	if (hw->adapter_stopped == 0)
463 		atl_dev_close(eth_dev);
464 
465 	eth_dev->dev_ops = NULL;
466 	eth_dev->rx_pkt_burst = NULL;
467 	eth_dev->tx_pkt_burst = NULL;
468 
469 	/* disable uio intr before callback unregister */
470 	rte_intr_disable(intr_handle);
471 	rte_intr_callback_unregister(intr_handle,
472 				     atl_dev_interrupt_handler, eth_dev);
473 
474 	rte_free(eth_dev->data->mac_addrs);
475 	eth_dev->data->mac_addrs = NULL;
476 
477 	return 0;
478 }
479 
480 static int
481 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
482 	struct rte_pci_device *pci_dev)
483 {
484 	return rte_eth_dev_pci_generic_probe(pci_dev,
485 		sizeof(struct atl_adapter), eth_atl_dev_init);
486 }
487 
488 static int
489 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
490 {
491 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
492 }
493 
494 static int
495 atl_dev_configure(struct rte_eth_dev *dev)
496 {
497 	struct atl_interrupt *intr =
498 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
499 
500 	PMD_INIT_FUNC_TRACE();
501 
502 	/* set flag to update link status after init */
503 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
504 
505 	return 0;
506 }
507 
508 /*
509  * Configure device link speed and setup link.
510  * It returns 0 on success.
511  */
512 static int
513 atl_dev_start(struct rte_eth_dev *dev)
514 {
515 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
516 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
517 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
518 	uint32_t intr_vector = 0;
519 	int status;
520 	int err;
521 
522 	PMD_INIT_FUNC_TRACE();
523 
524 	/* set adapter started */
525 	hw->adapter_stopped = 0;
526 
527 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
528 		PMD_INIT_LOG(ERR,
529 		"Invalid link_speeds for port %u, fix speed not supported",
530 				dev->data->port_id);
531 		return -EINVAL;
532 	}
533 
534 	/* disable uio/vfio intr/eventfd mapping */
535 	rte_intr_disable(intr_handle);
536 
537 	/* reinitialize adapter
538 	 * this calls reset and start
539 	 */
540 	status = atl_reset_hw(hw);
541 	if (status != 0)
542 		return -EIO;
543 
544 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
545 
546 	hw_atl_b0_hw_start(hw);
547 	/* check and configure queue intr-vector mapping */
548 	if ((rte_intr_cap_multiple(intr_handle) ||
549 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
550 	    dev->data->dev_conf.intr_conf.rxq != 0) {
551 		intr_vector = dev->data->nb_rx_queues;
552 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
553 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
554 					ATL_MAX_INTR_QUEUE_NUM);
555 			return -ENOTSUP;
556 		}
557 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
558 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
559 			return -1;
560 		}
561 	}
562 
563 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
564 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
565 				    dev->data->nb_rx_queues * sizeof(int), 0);
566 		if (intr_handle->intr_vec == NULL) {
567 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
568 				     " intr_vec", dev->data->nb_rx_queues);
569 			return -ENOMEM;
570 		}
571 	}
572 
573 	/* initialize transmission unit */
574 	atl_tx_init(dev);
575 
576 	/* This can fail when allocating mbufs for descriptor rings */
577 	err = atl_rx_init(dev);
578 	if (err) {
579 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
580 		goto error;
581 	}
582 
583 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
584 		hw->fw_ver_actual >> 24,
585 		(hw->fw_ver_actual >> 16) & 0xFF,
586 		hw->fw_ver_actual & 0xFFFF);
587 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
588 
589 	err = atl_start_queues(dev);
590 	if (err < 0) {
591 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
592 		goto error;
593 	}
594 
595 	err = atl_dev_set_link_up(dev);
596 
597 	err = hw->aq_fw_ops->update_link_status(hw);
598 
599 	if (err)
600 		goto error;
601 
602 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
603 
604 	if (rte_intr_allow_others(intr_handle)) {
605 		/* check if lsc interrupt is enabled */
606 		if (dev->data->dev_conf.intr_conf.lsc != 0)
607 			atl_dev_lsc_interrupt_setup(dev, true);
608 		else
609 			atl_dev_lsc_interrupt_setup(dev, false);
610 	} else {
611 		rte_intr_callback_unregister(intr_handle,
612 					     atl_dev_interrupt_handler, dev);
613 		if (dev->data->dev_conf.intr_conf.lsc != 0)
614 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
615 				     " no intr multiplex");
616 	}
617 
618 	/* check if rxq interrupt is enabled */
619 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
620 	    rte_intr_dp_is_en(intr_handle))
621 		atl_dev_rxq_interrupt_setup(dev);
622 
623 	/* enable uio/vfio intr/eventfd mapping */
624 	rte_intr_enable(intr_handle);
625 
626 	/* resume enabled intr since hw reset */
627 	atl_enable_intr(dev);
628 
629 	return 0;
630 
631 error:
632 	atl_stop_queues(dev);
633 	return -EIO;
634 }
635 
636 /*
637  * Stop device: disable rx and tx functions to allow for reconfiguring.
638  */
639 static void
640 atl_dev_stop(struct rte_eth_dev *dev)
641 {
642 	struct rte_eth_link link;
643 	struct aq_hw_s *hw =
644 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
645 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
646 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
647 
648 	PMD_INIT_FUNC_TRACE();
649 
650 	/* disable interrupts */
651 	atl_disable_intr(hw);
652 
653 	/* reset the NIC */
654 	atl_reset_hw(hw);
655 	hw->adapter_stopped = 1;
656 
657 	atl_stop_queues(dev);
658 
659 	/* Clear stored conf */
660 	dev->data->scattered_rx = 0;
661 	dev->data->lro = 0;
662 
663 	/* Clear recorded link status */
664 	memset(&link, 0, sizeof(link));
665 	rte_eth_linkstatus_set(dev, &link);
666 
667 	if (!rte_intr_allow_others(intr_handle))
668 		/* resume to the default handler */
669 		rte_intr_callback_register(intr_handle,
670 					   atl_dev_interrupt_handler,
671 					   (void *)dev);
672 
673 	/* Clean datapath event and queue/vec mapping */
674 	rte_intr_efd_disable(intr_handle);
675 	if (intr_handle->intr_vec != NULL) {
676 		rte_free(intr_handle->intr_vec);
677 		intr_handle->intr_vec = NULL;
678 	}
679 }
680 
681 /*
682  * Set device link up: enable tx.
683  */
684 static int
685 atl_dev_set_link_up(struct rte_eth_dev *dev)
686 {
687 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
688 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
689 	uint32_t speed_mask = 0;
690 
691 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
692 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
693 	} else {
694 		if (link_speeds & ETH_LINK_SPEED_10G)
695 			speed_mask |= AQ_NIC_RATE_10G;
696 		if (link_speeds & ETH_LINK_SPEED_5G)
697 			speed_mask |= AQ_NIC_RATE_5G;
698 		if (link_speeds & ETH_LINK_SPEED_1G)
699 			speed_mask |= AQ_NIC_RATE_1G;
700 		if (link_speeds & ETH_LINK_SPEED_2_5G)
701 			speed_mask |=  AQ_NIC_RATE_2G5;
702 		if (link_speeds & ETH_LINK_SPEED_100M)
703 			speed_mask |= AQ_NIC_RATE_100M;
704 	}
705 
706 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
707 }
708 
709 /*
710  * Set device link down: disable tx.
711  */
712 static int
713 atl_dev_set_link_down(struct rte_eth_dev *dev)
714 {
715 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
716 
717 	return hw->aq_fw_ops->set_link_speed(hw, 0);
718 }
719 
720 /*
721  * Reset and stop device.
722  */
723 static void
724 atl_dev_close(struct rte_eth_dev *dev)
725 {
726 	PMD_INIT_FUNC_TRACE();
727 
728 	atl_dev_stop(dev);
729 
730 	atl_free_queues(dev);
731 }
732 
733 static int
734 atl_dev_reset(struct rte_eth_dev *dev)
735 {
736 	int ret;
737 
738 	ret = eth_atl_dev_uninit(dev);
739 	if (ret)
740 		return ret;
741 
742 	ret = eth_atl_dev_init(dev);
743 
744 	return ret;
745 }
746 
747 static int
748 atl_dev_configure_macsec(struct rte_eth_dev *dev)
749 {
750 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
751 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
752 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
753 	struct macsec_msg_fw_request msg_macsec;
754 	struct macsec_msg_fw_response response;
755 
756 	if (!aqcfg->common.macsec_enabled ||
757 	    hw->aq_fw_ops->send_macsec_req == NULL)
758 		return 0;
759 
760 	memset(&msg_macsec, 0, sizeof(msg_macsec));
761 
762 	/* Creating set of sc/sa structures from parameters provided by DPDK */
763 
764 	/* Configure macsec */
765 	msg_macsec.msg_type = macsec_cfg_msg;
766 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
767 	msg_macsec.cfg.interrupts_enabled = 1;
768 
769 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
770 
771 	if (response.result)
772 		return -1;
773 
774 	memset(&msg_macsec, 0, sizeof(msg_macsec));
775 
776 	/* Configure TX SC */
777 
778 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
779 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
780 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
781 
782 	/* MAC addr for TX */
783 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
784 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
785 	msg_macsec.txsc.sa_mask = 0x3f;
786 
787 	msg_macsec.txsc.da_mask = 0;
788 	msg_macsec.txsc.tci = 0x0B;
789 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
790 
791 	/*
792 	 * Creating SCI (Secure Channel Identifier).
793 	 * SCI constructed from Source MAC and Port identifier
794 	 */
795 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
796 			       (msg_macsec.txsc.mac_sa[0] >> 16);
797 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
798 
799 	uint32_t port_identifier = 1;
800 
801 	msg_macsec.txsc.sci[1] = sci_hi_part;
802 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
803 
804 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
805 
806 	if (response.result)
807 		return -1;
808 
809 	memset(&msg_macsec, 0, sizeof(msg_macsec));
810 
811 	/* Configure RX SC */
812 
813 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
814 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
815 	msg_macsec.rxsc.replay_protect =
816 		aqcfg->common.replay_protection_enabled;
817 	msg_macsec.rxsc.anti_replay_window = 0;
818 
819 	/* MAC addr for RX */
820 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
821 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
822 	msg_macsec.rxsc.da_mask = 0;//0x3f;
823 
824 	msg_macsec.rxsc.sa_mask = 0;
825 
826 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
827 
828 	if (response.result)
829 		return -1;
830 
831 	memset(&msg_macsec, 0, sizeof(msg_macsec));
832 
833 	/* Configure RX SC */
834 
835 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
836 	msg_macsec.txsa.index = aqcfg->txsa.idx;
837 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
838 
839 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
840 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
841 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
842 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
843 
844 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
845 
846 	if (response.result)
847 		return -1;
848 
849 	memset(&msg_macsec, 0, sizeof(msg_macsec));
850 
851 	/* Configure RX SA */
852 
853 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
854 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
855 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
856 
857 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
858 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
859 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
860 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
861 
862 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
863 
864 	if (response.result)
865 		return -1;
866 
867 	return 0;
868 }
869 
870 int atl_macsec_enable(struct rte_eth_dev *dev,
871 		      uint8_t encr, uint8_t repl_prot)
872 {
873 	struct aq_hw_cfg_s *cfg =
874 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
875 
876 	cfg->aq_macsec.common.macsec_enabled = 1;
877 	cfg->aq_macsec.common.encryption_enabled = encr;
878 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
879 
880 	return 0;
881 }
882 
883 int atl_macsec_disable(struct rte_eth_dev *dev)
884 {
885 	struct aq_hw_cfg_s *cfg =
886 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
887 
888 	cfg->aq_macsec.common.macsec_enabled = 0;
889 
890 	return 0;
891 }
892 
893 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
894 {
895 	struct aq_hw_cfg_s *cfg =
896 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
897 
898 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
899 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
900 		RTE_ETHER_ADDR_LEN);
901 
902 	return 0;
903 }
904 
905 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
906 			   uint8_t *mac, uint16_t pi)
907 {
908 	struct aq_hw_cfg_s *cfg =
909 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
910 
911 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
912 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
913 		RTE_ETHER_ADDR_LEN);
914 	cfg->aq_macsec.rxsc.pi = pi;
915 
916 	return 0;
917 }
918 
919 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
920 			   uint8_t idx, uint8_t an,
921 			   uint32_t pn, uint8_t *key)
922 {
923 	struct aq_hw_cfg_s *cfg =
924 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
925 
926 	cfg->aq_macsec.txsa.idx = idx;
927 	cfg->aq_macsec.txsa.pn = pn;
928 	cfg->aq_macsec.txsa.an = an;
929 
930 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
931 	return 0;
932 }
933 
934 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
935 			   uint8_t idx, uint8_t an,
936 			   uint32_t pn, uint8_t *key)
937 {
938 	struct aq_hw_cfg_s *cfg =
939 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
940 
941 	cfg->aq_macsec.rxsa.idx = idx;
942 	cfg->aq_macsec.rxsa.pn = pn;
943 	cfg->aq_macsec.rxsa.an = an;
944 
945 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
946 	return 0;
947 }
948 
949 static int
950 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
951 {
952 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
953 	struct aq_hw_s *hw = &adapter->hw;
954 	struct atl_sw_stats *swstats = &adapter->sw_stats;
955 	unsigned int i;
956 
957 	hw->aq_fw_ops->update_stats(hw);
958 
959 	/* Fill out the rte_eth_stats statistics structure */
960 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
961 	stats->ibytes = hw->curr_stats.dma_oct_rc;
962 	stats->imissed = hw->curr_stats.dpc;
963 	stats->ierrors = hw->curr_stats.erpt;
964 
965 	stats->opackets = hw->curr_stats.dma_pkt_tc;
966 	stats->obytes = hw->curr_stats.dma_oct_tc;
967 	stats->oerrors = 0;
968 
969 	stats->rx_nombuf = swstats->rx_nombuf;
970 
971 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
972 		stats->q_ipackets[i] = swstats->q_ipackets[i];
973 		stats->q_opackets[i] = swstats->q_opackets[i];
974 		stats->q_ibytes[i] = swstats->q_ibytes[i];
975 		stats->q_obytes[i] = swstats->q_obytes[i];
976 		stats->q_errors[i] = swstats->q_errors[i];
977 	}
978 	return 0;
979 }
980 
981 static void
982 atl_dev_stats_reset(struct rte_eth_dev *dev)
983 {
984 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
985 	struct aq_hw_s *hw = &adapter->hw;
986 
987 	hw->aq_fw_ops->update_stats(hw);
988 
989 	/* Reset software totals */
990 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
991 
992 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
993 }
994 
995 static int
996 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
997 			 struct rte_eth_xstat_name *xstats_names,
998 			 unsigned int size)
999 {
1000 	unsigned int i;
1001 
1002 	if (!xstats_names)
1003 		return RTE_DIM(atl_xstats_tbl);
1004 
1005 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
1006 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
1007 			RTE_ETH_XSTATS_NAME_SIZE);
1008 
1009 	return i;
1010 }
1011 
1012 static int
1013 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1014 		   unsigned int n)
1015 {
1016 	struct atl_adapter *adapter = dev->data->dev_private;
1017 	struct aq_hw_s *hw = &adapter->hw;
1018 	struct get_stats req = { 0 };
1019 	struct macsec_msg_fw_request msg = { 0 };
1020 	struct macsec_msg_fw_response resp = { 0 };
1021 	int err = -1;
1022 	unsigned int i;
1023 
1024 	if (!stats)
1025 		return 0;
1026 
1027 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1028 		req.ingress_sa_index = 0xff;
1029 		req.egress_sc_index = 0xff;
1030 		req.egress_sa_index = 0xff;
1031 
1032 		msg.msg_type = macsec_get_stats_msg;
1033 		msg.stats = req;
1034 
1035 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1036 	}
1037 
1038 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
1039 		stats[i].id = i;
1040 
1041 		switch (atl_xstats_tbl[i].type) {
1042 		case XSTATS_TYPE_MSM:
1043 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1044 					 atl_xstats_tbl[i].offset);
1045 			break;
1046 		case XSTATS_TYPE_MACSEC:
1047 			if (err)
1048 				goto done;
1049 			stats[i].value = *(u64 *)((uint8_t *)&resp.stats +
1050 					 atl_xstats_tbl[i].offset);
1051 			break;
1052 		}
1053 	}
1054 done:
1055 	return i;
1056 }
1057 
1058 static int
1059 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1060 {
1061 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1062 	uint32_t fw_ver = 0;
1063 	unsigned int ret = 0;
1064 
1065 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1066 	if (ret)
1067 		return -EIO;
1068 
1069 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1070 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1071 
1072 	ret += 1; /* add string null-terminator */
1073 
1074 	if (fw_size < ret)
1075 		return ret;
1076 
1077 	return 0;
1078 }
1079 
1080 static void
1081 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1082 {
1083 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1084 
1085 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1086 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1087 
1088 	dev_info->min_rx_bufsize = 1024;
1089 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1090 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1091 	dev_info->max_vfs = pci_dev->max_vfs;
1092 
1093 	dev_info->max_hash_mac_addrs = 0;
1094 	dev_info->max_vmdq_pools = 0;
1095 	dev_info->vmdq_queue_num = 0;
1096 
1097 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1098 
1099 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1100 
1101 
1102 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1103 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1104 	};
1105 
1106 	dev_info->default_txconf = (struct rte_eth_txconf) {
1107 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1108 	};
1109 
1110 	dev_info->rx_desc_lim = rx_desc_lim;
1111 	dev_info->tx_desc_lim = tx_desc_lim;
1112 
1113 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1114 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1115 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1116 
1117 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1118 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1119 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1120 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1121 }
1122 
1123 static const uint32_t *
1124 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1125 {
1126 	static const uint32_t ptypes[] = {
1127 		RTE_PTYPE_L2_ETHER,
1128 		RTE_PTYPE_L2_ETHER_ARP,
1129 		RTE_PTYPE_L2_ETHER_VLAN,
1130 		RTE_PTYPE_L3_IPV4,
1131 		RTE_PTYPE_L3_IPV6,
1132 		RTE_PTYPE_L4_TCP,
1133 		RTE_PTYPE_L4_UDP,
1134 		RTE_PTYPE_L4_SCTP,
1135 		RTE_PTYPE_L4_ICMP,
1136 		RTE_PTYPE_UNKNOWN
1137 	};
1138 
1139 	if (dev->rx_pkt_burst == atl_recv_pkts)
1140 		return ptypes;
1141 
1142 	return NULL;
1143 }
1144 
1145 static void
1146 atl_dev_delayed_handler(void *param)
1147 {
1148 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1149 
1150 	atl_dev_configure_macsec(dev);
1151 }
1152 
1153 
1154 /* return 0 means link status changed, -1 means not changed */
1155 static int
1156 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1157 {
1158 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1159 	struct rte_eth_link link, old;
1160 	u32 fc = AQ_NIC_FC_OFF;
1161 	int err = 0;
1162 
1163 	link.link_status = ETH_LINK_DOWN;
1164 	link.link_speed = 0;
1165 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1166 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1167 	memset(&old, 0, sizeof(old));
1168 
1169 	/* load old link status */
1170 	rte_eth_linkstatus_get(dev, &old);
1171 
1172 	/* read current link status */
1173 	err = hw->aq_fw_ops->update_link_status(hw);
1174 
1175 	if (err)
1176 		return 0;
1177 
1178 	if (hw->aq_link_status.mbps == 0) {
1179 		/* write default (down) link status */
1180 		rte_eth_linkstatus_set(dev, &link);
1181 		if (link.link_status == old.link_status)
1182 			return -1;
1183 		return 0;
1184 	}
1185 
1186 	link.link_status = ETH_LINK_UP;
1187 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1188 	link.link_speed = hw->aq_link_status.mbps;
1189 
1190 	rte_eth_linkstatus_set(dev, &link);
1191 
1192 	if (link.link_status == old.link_status)
1193 		return -1;
1194 
1195 	/* Driver has to update flow control settings on RX block
1196 	 * on any link event.
1197 	 * We should query FW whether it negotiated FC.
1198 	 */
1199 	if (hw->aq_fw_ops->get_flow_control) {
1200 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1201 		hw_atl_b0_set_fc(hw, fc, 0U);
1202 	}
1203 
1204 	if (rte_eal_alarm_set(1000 * 1000,
1205 			      atl_dev_delayed_handler, (void *)dev) < 0)
1206 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1207 
1208 	return 0;
1209 }
1210 
1211 static void
1212 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1213 {
1214 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 
1216 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1217 }
1218 
1219 static void
1220 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1221 {
1222 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1223 
1224 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1225 }
1226 
1227 static void
1228 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1229 {
1230 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1231 
1232 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1233 }
1234 
1235 static void
1236 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1237 {
1238 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1239 
1240 	if (dev->data->promiscuous == 1)
1241 		return; /* must remain in all_multicast mode */
1242 
1243 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1244 }
1245 
1246 /**
1247  * It clears the interrupt causes and enables the interrupt.
1248  * It will be called once only during nic initialized.
1249  *
1250  * @param dev
1251  *  Pointer to struct rte_eth_dev.
1252  * @param on
1253  *  Enable or Disable.
1254  *
1255  * @return
1256  *  - On success, zero.
1257  *  - On failure, a negative value.
1258  */
1259 
1260 static int
1261 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1262 {
1263 	atl_dev_link_status_print(dev);
1264 	return 0;
1265 }
1266 
1267 static int
1268 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1269 {
1270 	return 0;
1271 }
1272 
1273 
1274 static int
1275 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1276 {
1277 	struct atl_interrupt *intr =
1278 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1279 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1280 	u64 cause = 0;
1281 
1282 	hw_atl_b0_hw_irq_read(hw, &cause);
1283 
1284 	atl_disable_intr(hw);
1285 
1286 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1287 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1288 
1289 	return 0;
1290 }
1291 
1292 /**
1293  * It gets and then prints the link status.
1294  *
1295  * @param dev
1296  *  Pointer to struct rte_eth_dev.
1297  *
1298  * @return
1299  *  - On success, zero.
1300  *  - On failure, a negative value.
1301  */
1302 static void
1303 atl_dev_link_status_print(struct rte_eth_dev *dev)
1304 {
1305 	struct rte_eth_link link;
1306 
1307 	memset(&link, 0, sizeof(link));
1308 	rte_eth_linkstatus_get(dev, &link);
1309 	if (link.link_status) {
1310 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1311 					(int)(dev->data->port_id),
1312 					(unsigned int)link.link_speed,
1313 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1314 					"full-duplex" : "half-duplex");
1315 	} else {
1316 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1317 				(int)(dev->data->port_id));
1318 	}
1319 
1320 
1321 #ifdef DEBUG
1322 {
1323 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1324 
1325 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1326 				pci_dev->addr.domain,
1327 				pci_dev->addr.bus,
1328 				pci_dev->addr.devid,
1329 				pci_dev->addr.function);
1330 }
1331 #endif
1332 
1333 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1334 }
1335 
1336 /*
1337  * It executes link_update after knowing an interrupt occurred.
1338  *
1339  * @param dev
1340  *  Pointer to struct rte_eth_dev.
1341  *
1342  * @return
1343  *  - On success, zero.
1344  *  - On failure, a negative value.
1345  */
1346 static int
1347 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1348 			   struct rte_intr_handle *intr_handle)
1349 {
1350 	struct atl_interrupt *intr =
1351 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1352 	struct atl_adapter *adapter = dev->data->dev_private;
1353 	struct aq_hw_s *hw = &adapter->hw;
1354 
1355 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1356 		goto done;
1357 
1358 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1359 
1360 	/* Notify userapp if link status changed */
1361 	if (!atl_dev_link_update(dev, 0)) {
1362 		atl_dev_link_status_print(dev);
1363 		_rte_eth_dev_callback_process(dev,
1364 			RTE_ETH_EVENT_INTR_LSC, NULL);
1365 	} else {
1366 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1367 			goto done;
1368 
1369 		/* Check macsec Keys expired */
1370 		struct get_stats req = { 0 };
1371 		struct macsec_msg_fw_request msg = { 0 };
1372 		struct macsec_msg_fw_response resp = { 0 };
1373 
1374 		req.ingress_sa_index = 0x0;
1375 		req.egress_sc_index = 0x0;
1376 		req.egress_sa_index = 0x0;
1377 		msg.msg_type = macsec_get_stats_msg;
1378 		msg.stats = req;
1379 
1380 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1381 		if (err) {
1382 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1383 			goto done;
1384 		}
1385 		if (resp.stats.egress_threshold_expired ||
1386 		    resp.stats.ingress_threshold_expired ||
1387 		    resp.stats.egress_expired ||
1388 		    resp.stats.ingress_expired) {
1389 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1390 			_rte_eth_dev_callback_process(dev,
1391 				RTE_ETH_EVENT_MACSEC, NULL);
1392 		}
1393 	}
1394 done:
1395 	atl_enable_intr(dev);
1396 	rte_intr_ack(intr_handle);
1397 
1398 	return 0;
1399 }
1400 
1401 /**
1402  * Interrupt handler triggered by NIC  for handling
1403  * specific interrupt.
1404  *
1405  * @param handle
1406  *  Pointer to interrupt handle.
1407  * @param param
1408  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1409  *
1410  * @return
1411  *  void
1412  */
1413 static void
1414 atl_dev_interrupt_handler(void *param)
1415 {
1416 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1417 
1418 	atl_dev_interrupt_get_status(dev);
1419 	atl_dev_interrupt_action(dev, dev->intr_handle);
1420 }
1421 
1422 
1423 static int
1424 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1425 {
1426 	return SFP_EEPROM_SIZE;
1427 }
1428 
1429 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1430 		       struct rte_dev_eeprom_info *eeprom)
1431 {
1432 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1433 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1434 
1435 	if (hw->aq_fw_ops->get_eeprom == NULL)
1436 		return -ENOTSUP;
1437 
1438 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1439 	    eeprom->data == NULL)
1440 		return -EINVAL;
1441 
1442 	if (eeprom->magic > 0x7F)
1443 		return -EINVAL;
1444 
1445 	if (eeprom->magic)
1446 		dev_addr = eeprom->magic;
1447 
1448 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1449 					 eeprom->length, eeprom->offset);
1450 }
1451 
1452 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1453 		       struct rte_dev_eeprom_info *eeprom)
1454 {
1455 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1456 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1457 
1458 	if (hw->aq_fw_ops->set_eeprom == NULL)
1459 		return -ENOTSUP;
1460 
1461 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1462 	    eeprom->data == NULL)
1463 		return -EINVAL;
1464 
1465 	if (eeprom->magic > 0x7F)
1466 		return -EINVAL;
1467 
1468 	if (eeprom->magic)
1469 		dev_addr = eeprom->magic;
1470 
1471 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1472 					 eeprom->length, eeprom->offset);
1473 }
1474 
1475 static int
1476 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1477 {
1478 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1479 	u32 mif_id;
1480 	int err;
1481 
1482 	if (regs->data == NULL) {
1483 		regs->length = hw_atl_utils_hw_get_reg_length();
1484 		regs->width = sizeof(u32);
1485 		return 0;
1486 	}
1487 
1488 	/* Only full register dump is supported */
1489 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1490 		return -ENOTSUP;
1491 
1492 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1493 
1494 	/* Device version */
1495 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1496 	regs->version = mif_id & 0xFFU;
1497 
1498 	return err;
1499 }
1500 
1501 static int
1502 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1503 {
1504 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1505 	u32 fc = AQ_NIC_FC_OFF;
1506 
1507 	if (hw->aq_fw_ops->get_flow_control == NULL)
1508 		return -ENOTSUP;
1509 
1510 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1511 
1512 	if (fc == AQ_NIC_FC_OFF)
1513 		fc_conf->mode = RTE_FC_NONE;
1514 	else if (fc & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1515 		fc_conf->mode = RTE_FC_FULL;
1516 	else if (fc & AQ_NIC_FC_RX)
1517 		fc_conf->mode = RTE_FC_RX_PAUSE;
1518 	else if (fc & AQ_NIC_FC_RX)
1519 		fc_conf->mode = RTE_FC_TX_PAUSE;
1520 
1521 	return 0;
1522 }
1523 
1524 static int
1525 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1526 {
1527 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1528 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1529 
1530 
1531 	if (hw->aq_fw_ops->set_flow_control == NULL)
1532 		return -ENOTSUP;
1533 
1534 	if (fc_conf->mode == RTE_FC_NONE)
1535 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1536 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1537 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1538 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1539 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1540 	else if (fc_conf->mode == RTE_FC_FULL)
1541 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1542 
1543 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1544 		return hw->aq_fw_ops->set_flow_control(hw);
1545 
1546 	return 0;
1547 }
1548 
1549 static int
1550 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1551 		    u8 *mac_addr, bool enable)
1552 {
1553 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1554 	unsigned int h = 0U;
1555 	unsigned int l = 0U;
1556 	int err;
1557 
1558 	if (mac_addr) {
1559 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1560 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1561 			(mac_addr[4] << 8) | mac_addr[5];
1562 	}
1563 
1564 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1565 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1566 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1567 
1568 	if (enable)
1569 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1570 
1571 	err = aq_hw_err_from_flags(hw);
1572 
1573 	return err;
1574 }
1575 
1576 static int
1577 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1578 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1579 {
1580 	if (rte_is_zero_ether_addr(mac_addr)) {
1581 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1582 		return -EINVAL;
1583 	}
1584 
1585 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1586 }
1587 
1588 static void
1589 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1590 {
1591 	atl_update_mac_addr(dev, index, NULL, false);
1592 }
1593 
1594 static int
1595 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1596 {
1597 	atl_remove_mac_addr(dev, 0);
1598 	atl_add_mac_addr(dev, addr, 0, 0);
1599 	return 0;
1600 }
1601 
1602 static int
1603 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1604 {
1605 	struct rte_eth_dev_info dev_info;
1606 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1607 
1608 	atl_dev_info_get(dev, &dev_info);
1609 
1610 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1611 		return -EINVAL;
1612 
1613 	/* update max frame size */
1614 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1615 
1616 	return 0;
1617 }
1618 
1619 static int
1620 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1621 {
1622 	struct aq_hw_cfg_s *cfg =
1623 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1624 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1625 	int err = 0;
1626 	int i = 0;
1627 
1628 	PMD_INIT_FUNC_TRACE();
1629 
1630 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1631 		if (cfg->vlan_filter[i] == vlan_id) {
1632 			if (!on) {
1633 				/* Disable VLAN filter. */
1634 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1635 
1636 				/* Clear VLAN filter entry */
1637 				cfg->vlan_filter[i] = 0;
1638 			}
1639 			break;
1640 		}
1641 	}
1642 
1643 	/* VLAN_ID was not found. So, nothing to delete. */
1644 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1645 		goto exit;
1646 
1647 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1648 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1649 		goto exit;
1650 
1651 	/* Try to found free VLAN filter to add new VLAN_ID */
1652 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1653 		if (cfg->vlan_filter[i] == 0)
1654 			break;
1655 	}
1656 
1657 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1658 		/* We have no free VLAN filter to add new VLAN_ID*/
1659 		err = -ENOMEM;
1660 		goto exit;
1661 	}
1662 
1663 	cfg->vlan_filter[i] = vlan_id;
1664 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1665 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1666 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1667 
1668 exit:
1669 	/* Enable VLAN promisc mode if vlan_filter empty  */
1670 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1671 		if (cfg->vlan_filter[i] != 0)
1672 			break;
1673 	}
1674 
1675 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1676 
1677 	return err;
1678 }
1679 
1680 static int
1681 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1682 {
1683 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1684 	struct aq_hw_cfg_s *cfg =
1685 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1686 	int i;
1687 
1688 	PMD_INIT_FUNC_TRACE();
1689 
1690 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1691 		if (cfg->vlan_filter[i])
1692 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1693 	}
1694 	return 0;
1695 }
1696 
1697 static int
1698 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1699 {
1700 	struct aq_hw_cfg_s *cfg =
1701 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1702 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1703 	int ret = 0;
1704 	int i;
1705 
1706 	PMD_INIT_FUNC_TRACE();
1707 
1708 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1709 
1710 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1711 
1712 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1713 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1714 
1715 	if (mask & ETH_VLAN_EXTEND_MASK)
1716 		ret = -ENOTSUP;
1717 
1718 	return ret;
1719 }
1720 
1721 static int
1722 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1723 		  uint16_t tpid)
1724 {
1725 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1726 	int err = 0;
1727 
1728 	PMD_INIT_FUNC_TRACE();
1729 
1730 	switch (vlan_type) {
1731 	case ETH_VLAN_TYPE_INNER:
1732 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1733 		break;
1734 	case ETH_VLAN_TYPE_OUTER:
1735 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1736 		break;
1737 	default:
1738 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1739 		err = -ENOTSUP;
1740 	}
1741 
1742 	return err;
1743 }
1744 
1745 static void
1746 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1747 {
1748 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1749 
1750 	PMD_INIT_FUNC_TRACE();
1751 
1752 	if (queue_id > dev->data->nb_rx_queues) {
1753 		PMD_DRV_LOG(ERR, "Invalid queue id");
1754 		return;
1755 	}
1756 
1757 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1758 }
1759 
1760 static int
1761 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1762 			  struct rte_ether_addr *mc_addr_set,
1763 			  uint32_t nb_mc_addr)
1764 {
1765 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1766 	u32 i;
1767 
1768 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1769 		return -EINVAL;
1770 
1771 	/* Update whole uc filters table */
1772 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1773 		u8 *mac_addr = NULL;
1774 		u32 l = 0, h = 0;
1775 
1776 		if (i < nb_mc_addr) {
1777 			mac_addr = mc_addr_set[i].addr_bytes;
1778 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1779 				(mac_addr[4] << 8) | mac_addr[5];
1780 			h = (mac_addr[0] << 8) | mac_addr[1];
1781 		}
1782 
1783 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1784 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1785 							HW_ATL_B0_MAC_MIN + i);
1786 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1787 							HW_ATL_B0_MAC_MIN + i);
1788 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1789 					   HW_ATL_B0_MAC_MIN + i);
1790 	}
1791 
1792 	return 0;
1793 }
1794 
1795 static int
1796 atl_reta_update(struct rte_eth_dev *dev,
1797 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1798 		   uint16_t reta_size)
1799 {
1800 	int i;
1801 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1802 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1803 
1804 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1805 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1806 					dev->data->nb_rx_queues - 1);
1807 
1808 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1809 	return 0;
1810 }
1811 
1812 static int
1813 atl_reta_query(struct rte_eth_dev *dev,
1814 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1815 		    uint16_t reta_size)
1816 {
1817 	int i;
1818 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1819 
1820 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1821 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1822 	reta_conf->mask = ~0U;
1823 	return 0;
1824 }
1825 
1826 static int
1827 atl_rss_hash_update(struct rte_eth_dev *dev,
1828 				 struct rte_eth_rss_conf *rss_conf)
1829 {
1830 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1831 	struct aq_hw_cfg_s *cfg =
1832 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1833 	static u8 def_rss_key[40] = {
1834 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1835 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1836 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1837 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1838 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1839 	};
1840 
1841 	cfg->is_rss = !!rss_conf->rss_hf;
1842 	if (rss_conf->rss_key) {
1843 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1844 		       rss_conf->rss_key_len);
1845 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1846 	} else {
1847 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1848 		       sizeof(def_rss_key));
1849 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1850 	}
1851 
1852 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1853 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1854 	return 0;
1855 }
1856 
1857 static int
1858 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1859 				 struct rte_eth_rss_conf *rss_conf)
1860 {
1861 	struct aq_hw_cfg_s *cfg =
1862 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1863 
1864 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1865 	if (rss_conf->rss_key) {
1866 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1867 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1868 		       rss_conf->rss_key_len);
1869 	}
1870 
1871 	return 0;
1872 }
1873 
1874 static bool
1875 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1876 {
1877 	if (strcmp(dev->device->driver->name, drv->driver.name))
1878 		return false;
1879 
1880 	return true;
1881 }
1882 
1883 bool
1884 is_atlantic_supported(struct rte_eth_dev *dev)
1885 {
1886 	return is_device_supported(dev, &rte_atl_pmd);
1887 }
1888 
1889 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1890 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1891 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1892 
1893 RTE_INIT(atl_init_log)
1894 {
1895 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1896 	if (atl_logtype_init >= 0)
1897 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1898 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1899 	if (atl_logtype_driver >= 0)
1900 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1901 }
1902