xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static int atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static int  atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
47 
48 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
49 
50 /* VLAN stuff */
51 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
52 		uint16_t vlan_id, int on);
53 
54 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
55 
56 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
57 				     uint16_t queue_id, int on);
58 
59 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
60 			     enum rte_vlan_type vlan_type, uint16_t tpid);
61 
62 /* EEPROM */
63 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
64 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
65 			      struct rte_dev_eeprom_info *eeprom);
66 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
67 			      struct rte_dev_eeprom_info *eeprom);
68 
69 /* Regs */
70 static int atl_dev_get_regs(struct rte_eth_dev *dev,
71 			    struct rte_dev_reg_info *regs);
72 
73 /* Flow control */
74 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
75 			       struct rte_eth_fc_conf *fc_conf);
76 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
77 			       struct rte_eth_fc_conf *fc_conf);
78 
79 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
80 
81 /* Interrupts */
82 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
83 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
84 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
85 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
86 				    struct rte_intr_handle *handle);
87 static void atl_dev_interrupt_handler(void *param);
88 
89 
90 static int atl_add_mac_addr(struct rte_eth_dev *dev,
91 			    struct rte_ether_addr *mac_addr,
92 			    uint32_t index, uint32_t pool);
93 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
94 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
95 					   struct rte_ether_addr *mac_addr);
96 
97 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
98 				    struct rte_ether_addr *mc_addr_set,
99 				    uint32_t nb_mc_addr);
100 
101 /* RSS */
102 static int atl_reta_update(struct rte_eth_dev *dev,
103 			     struct rte_eth_rss_reta_entry64 *reta_conf,
104 			     uint16_t reta_size);
105 static int atl_reta_query(struct rte_eth_dev *dev,
106 			    struct rte_eth_rss_reta_entry64 *reta_conf,
107 			    uint16_t reta_size);
108 static int atl_rss_hash_update(struct rte_eth_dev *dev,
109 				 struct rte_eth_rss_conf *rss_conf);
110 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
111 				   struct rte_eth_rss_conf *rss_conf);
112 
113 
114 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
115 	struct rte_pci_device *pci_dev);
116 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
117 
118 static int atl_dev_info_get(struct rte_eth_dev *dev,
119 				struct rte_eth_dev_info *dev_info);
120 
121 /*
122  * The set of PCI devices this driver supports
123  */
124 static const struct rte_pci_id pci_id_atl_map[] = {
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
129 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
130 
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
137 
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
144 
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
147 	{ .vendor_id = 0, /* sentinel */ },
148 };
149 
150 static struct rte_pci_driver rte_atl_pmd = {
151 	.id_table = pci_id_atl_map,
152 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
153 	.probe = eth_atl_pci_probe,
154 	.remove = eth_atl_pci_remove,
155 };
156 
157 #define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
158 			| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
159 			| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
160 			| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
161 			| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
162 			| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
163 
164 #define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
165 			| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
166 			| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
167 			| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
168 			| RTE_ETH_TX_OFFLOAD_TCP_TSO \
169 			| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
170 			| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
171 
172 #define SFP_EEPROM_SIZE 0x100
173 
174 static const struct rte_eth_desc_lim rx_desc_lim = {
175 	.nb_max = ATL_MAX_RING_DESC,
176 	.nb_min = ATL_MIN_RING_DESC,
177 	.nb_align = ATL_RXD_ALIGN,
178 };
179 
180 static const struct rte_eth_desc_lim tx_desc_lim = {
181 	.nb_max = ATL_MAX_RING_DESC,
182 	.nb_min = ATL_MIN_RING_DESC,
183 	.nb_align = ATL_TXD_ALIGN,
184 	.nb_seg_max = ATL_TX_MAX_SEG,
185 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
186 };
187 
188 enum atl_xstats_type {
189 	XSTATS_TYPE_MSM = 0,
190 	XSTATS_TYPE_MACSEC,
191 };
192 
193 #define ATL_XSTATS_FIELD(name) { \
194 	#name, \
195 	offsetof(struct aq_stats_s, name), \
196 	XSTATS_TYPE_MSM \
197 }
198 
199 #define ATL_MACSEC_XSTATS_FIELD(name) { \
200 	#name, \
201 	offsetof(struct macsec_stats, name), \
202 	XSTATS_TYPE_MACSEC \
203 }
204 
205 struct atl_xstats_tbl_s {
206 	const char *name;
207 	unsigned int offset;
208 	enum atl_xstats_type type;
209 };
210 
211 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
212 	ATL_XSTATS_FIELD(uprc),
213 	ATL_XSTATS_FIELD(mprc),
214 	ATL_XSTATS_FIELD(bprc),
215 	ATL_XSTATS_FIELD(erpt),
216 	ATL_XSTATS_FIELD(uptc),
217 	ATL_XSTATS_FIELD(mptc),
218 	ATL_XSTATS_FIELD(bptc),
219 	ATL_XSTATS_FIELD(erpr),
220 	ATL_XSTATS_FIELD(ubrc),
221 	ATL_XSTATS_FIELD(ubtc),
222 	ATL_XSTATS_FIELD(mbrc),
223 	ATL_XSTATS_FIELD(mbtc),
224 	ATL_XSTATS_FIELD(bbrc),
225 	ATL_XSTATS_FIELD(bbtc),
226 	/* Ingress Common Counters */
227 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
228 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
229 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
230 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
231 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
232 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
233 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
234 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
235 	/* Ingress SA Counters */
236 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
237 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
238 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
239 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
244 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
245 	/* Egress Common Counters */
246 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
247 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
250 	/* Egress SC Counters */
251 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
252 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
253 	/* Egress SA Counters */
254 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
255 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
256 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
258 };
259 
260 static const struct eth_dev_ops atl_eth_dev_ops = {
261 	.dev_configure	      = atl_dev_configure,
262 	.dev_start	      = atl_dev_start,
263 	.dev_stop	      = atl_dev_stop,
264 	.dev_set_link_up      = atl_dev_set_link_up,
265 	.dev_set_link_down    = atl_dev_set_link_down,
266 	.dev_close	      = atl_dev_close,
267 	.dev_reset	      = atl_dev_reset,
268 
269 	/* PROMISC */
270 	.promiscuous_enable   = atl_dev_promiscuous_enable,
271 	.promiscuous_disable  = atl_dev_promiscuous_disable,
272 	.allmulticast_enable  = atl_dev_allmulticast_enable,
273 	.allmulticast_disable = atl_dev_allmulticast_disable,
274 
275 	/* Link */
276 	.link_update	      = atl_dev_link_update,
277 
278 	.get_reg              = atl_dev_get_regs,
279 
280 	/* Stats */
281 	.stats_get	      = atl_dev_stats_get,
282 	.xstats_get	      = atl_dev_xstats_get,
283 	.xstats_get_names     = atl_dev_xstats_get_names,
284 	.stats_reset	      = atl_dev_stats_reset,
285 	.xstats_reset	      = atl_dev_stats_reset,
286 
287 	.fw_version_get       = atl_fw_version_get,
288 	.dev_infos_get	      = atl_dev_info_get,
289 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
290 
291 	.mtu_set              = atl_dev_mtu_set,
292 
293 	/* VLAN */
294 	.vlan_filter_set      = atl_vlan_filter_set,
295 	.vlan_offload_set     = atl_vlan_offload_set,
296 	.vlan_tpid_set        = atl_vlan_tpid_set,
297 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
298 
299 	/* Queue Control */
300 	.rx_queue_start	      = atl_rx_queue_start,
301 	.rx_queue_stop	      = atl_rx_queue_stop,
302 	.rx_queue_setup       = atl_rx_queue_setup,
303 	.rx_queue_release     = atl_rx_queue_release,
304 
305 	.tx_queue_start	      = atl_tx_queue_start,
306 	.tx_queue_stop	      = atl_tx_queue_stop,
307 	.tx_queue_setup       = atl_tx_queue_setup,
308 	.tx_queue_release     = atl_tx_queue_release,
309 
310 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
311 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
312 
313 	/* EEPROM */
314 	.get_eeprom_length    = atl_dev_get_eeprom_length,
315 	.get_eeprom           = atl_dev_get_eeprom,
316 	.set_eeprom           = atl_dev_set_eeprom,
317 
318 	/* Flow Control */
319 	.flow_ctrl_get	      = atl_flow_ctrl_get,
320 	.flow_ctrl_set	      = atl_flow_ctrl_set,
321 
322 	/* MAC */
323 	.mac_addr_add	      = atl_add_mac_addr,
324 	.mac_addr_remove      = atl_remove_mac_addr,
325 	.mac_addr_set	      = atl_set_default_mac_addr,
326 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
327 	.rxq_info_get	      = atl_rxq_info_get,
328 	.txq_info_get	      = atl_txq_info_get,
329 
330 	.reta_update          = atl_reta_update,
331 	.reta_query           = atl_reta_query,
332 	.rss_hash_update      = atl_rss_hash_update,
333 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
334 };
335 
336 static inline int32_t
337 atl_reset_hw(struct aq_hw_s *hw)
338 {
339 	return hw_atl_b0_hw_reset(hw);
340 }
341 
342 static inline void
343 atl_enable_intr(struct rte_eth_dev *dev)
344 {
345 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
346 
347 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
348 }
349 
350 static void
351 atl_disable_intr(struct aq_hw_s *hw)
352 {
353 	PMD_INIT_FUNC_TRACE();
354 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
355 }
356 
357 static int
358 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
359 {
360 	struct atl_adapter *adapter = eth_dev->data->dev_private;
361 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
362 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
363 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
364 	int err = 0;
365 
366 	PMD_INIT_FUNC_TRACE();
367 
368 	eth_dev->dev_ops = &atl_eth_dev_ops;
369 
370 	eth_dev->rx_queue_count       = atl_rx_queue_count;
371 	eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
372 	eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
373 
374 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
375 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
376 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
377 
378 	/* For secondary processes, the primary process has done all the work */
379 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
380 		return 0;
381 
382 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
383 
384 	/* Vendor and Device ID need to be set before init of shared code */
385 	hw->device_id = pci_dev->id.device_id;
386 	hw->vendor_id = pci_dev->id.vendor_id;
387 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
388 
389 	/* Hardware configuration - hardcode */
390 	adapter->hw_cfg.is_lro = false;
391 	adapter->hw_cfg.wol = false;
392 	adapter->hw_cfg.is_rss = false;
393 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
394 
395 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
396 			  AQ_NIC_RATE_5G |
397 			  AQ_NIC_RATE_2G5 |
398 			  AQ_NIC_RATE_1G |
399 			  AQ_NIC_RATE_100M;
400 
401 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
402 	adapter->hw_cfg.aq_rss.indirection_table_size =
403 		HW_ATL_B0_RSS_REDIRECTION_MAX;
404 
405 	hw->aq_nic_cfg = &adapter->hw_cfg;
406 
407 	pthread_mutex_init(&hw->mbox_mutex, NULL);
408 
409 	/* disable interrupt */
410 	atl_disable_intr(hw);
411 
412 	/* Allocate memory for storing MAC addresses */
413 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
414 					RTE_ETHER_ADDR_LEN, 0);
415 	if (eth_dev->data->mac_addrs == NULL) {
416 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
417 		return -ENOMEM;
418 	}
419 
420 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
421 	if (err)
422 		return err;
423 
424 	/* Copy the permanent MAC address */
425 	if (hw->aq_fw_ops->get_mac_permanent(hw,
426 			eth_dev->data->mac_addrs->addr_bytes) != 0)
427 		return -EINVAL;
428 
429 	/* Reset the hw statistics */
430 	atl_dev_stats_reset(eth_dev);
431 
432 	rte_intr_callback_register(intr_handle,
433 				   atl_dev_interrupt_handler, eth_dev);
434 
435 	/* enable uio/vfio intr/eventfd mapping */
436 	rte_intr_enable(intr_handle);
437 
438 	/* enable support intr */
439 	atl_enable_intr(eth_dev);
440 
441 	return err;
442 }
443 
444 static int
445 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
446 	struct rte_pci_device *pci_dev)
447 {
448 	return rte_eth_dev_pci_generic_probe(pci_dev,
449 		sizeof(struct atl_adapter), eth_atl_dev_init);
450 }
451 
452 static int
453 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
454 {
455 	return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close);
456 }
457 
458 static int
459 atl_dev_configure(struct rte_eth_dev *dev)
460 {
461 	struct atl_interrupt *intr =
462 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
463 
464 	PMD_INIT_FUNC_TRACE();
465 
466 	/* set flag to update link status after init */
467 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
468 
469 	return 0;
470 }
471 
472 /*
473  * Configure device link speed and setup link.
474  * It returns 0 on success.
475  */
476 static int
477 atl_dev_start(struct rte_eth_dev *dev)
478 {
479 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
480 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
481 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
482 	uint32_t intr_vector = 0;
483 	int status;
484 	int err;
485 
486 	PMD_INIT_FUNC_TRACE();
487 
488 	/* set adapter started */
489 	hw->adapter_stopped = 0;
490 
491 	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
492 		PMD_INIT_LOG(ERR,
493 		"Invalid link_speeds for port %u, fix speed not supported",
494 				dev->data->port_id);
495 		return -EINVAL;
496 	}
497 
498 	/* disable uio/vfio intr/eventfd mapping */
499 	rte_intr_disable(intr_handle);
500 
501 	/* reinitialize adapter
502 	 * this calls reset and start
503 	 */
504 	status = atl_reset_hw(hw);
505 	if (status != 0)
506 		return -EIO;
507 
508 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
509 
510 	hw_atl_b0_hw_start(hw);
511 	/* check and configure queue intr-vector mapping */
512 	if ((rte_intr_cap_multiple(intr_handle) ||
513 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
514 	    dev->data->dev_conf.intr_conf.rxq != 0) {
515 		intr_vector = dev->data->nb_rx_queues;
516 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
517 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
518 					ATL_MAX_INTR_QUEUE_NUM);
519 			return -ENOTSUP;
520 		}
521 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
522 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
523 			return -1;
524 		}
525 	}
526 
527 	if (rte_intr_dp_is_en(intr_handle)) {
528 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
529 						   dev->data->nb_rx_queues)) {
530 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
531 				     " intr_vec", dev->data->nb_rx_queues);
532 			return -ENOMEM;
533 		}
534 	}
535 
536 	/* initialize transmission unit */
537 	atl_tx_init(dev);
538 
539 	/* This can fail when allocating mbufs for descriptor rings */
540 	err = atl_rx_init(dev);
541 	if (err) {
542 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
543 		goto error;
544 	}
545 
546 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
547 		hw->fw_ver_actual >> 24,
548 		(hw->fw_ver_actual >> 16) & 0xFF,
549 		hw->fw_ver_actual & 0xFFFF);
550 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
551 
552 	err = atl_start_queues(dev);
553 	if (err < 0) {
554 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
555 		goto error;
556 	}
557 
558 	err = atl_dev_set_link_up(dev);
559 
560 	err = hw->aq_fw_ops->update_link_status(hw);
561 
562 	if (err)
563 		goto error;
564 
565 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
566 
567 	if (rte_intr_allow_others(intr_handle)) {
568 		/* check if lsc interrupt is enabled */
569 		if (dev->data->dev_conf.intr_conf.lsc != 0)
570 			atl_dev_lsc_interrupt_setup(dev, true);
571 		else
572 			atl_dev_lsc_interrupt_setup(dev, false);
573 	} else {
574 		rte_intr_callback_unregister(intr_handle,
575 					     atl_dev_interrupt_handler, dev);
576 		if (dev->data->dev_conf.intr_conf.lsc != 0)
577 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
578 				     " no intr multiplex");
579 	}
580 
581 	/* check if rxq interrupt is enabled */
582 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
583 	    rte_intr_dp_is_en(intr_handle))
584 		atl_dev_rxq_interrupt_setup(dev);
585 
586 	/* enable uio/vfio intr/eventfd mapping */
587 	rte_intr_enable(intr_handle);
588 
589 	/* resume enabled intr since hw reset */
590 	atl_enable_intr(dev);
591 
592 	return 0;
593 
594 error:
595 	atl_stop_queues(dev);
596 	return -EIO;
597 }
598 
599 /*
600  * Stop device: disable rx and tx functions to allow for reconfiguring.
601  */
602 static int
603 atl_dev_stop(struct rte_eth_dev *dev)
604 {
605 	struct rte_eth_link link;
606 	struct aq_hw_s *hw =
607 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
609 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
610 
611 	PMD_INIT_FUNC_TRACE();
612 	dev->data->dev_started = 0;
613 
614 	/* disable interrupts */
615 	atl_disable_intr(hw);
616 
617 	/* reset the NIC */
618 	atl_reset_hw(hw);
619 	hw->adapter_stopped = 1;
620 
621 	atl_stop_queues(dev);
622 
623 	/* Clear stored conf */
624 	dev->data->scattered_rx = 0;
625 	dev->data->lro = 0;
626 
627 	/* Clear recorded link status */
628 	memset(&link, 0, sizeof(link));
629 	rte_eth_linkstatus_set(dev, &link);
630 
631 	if (!rte_intr_allow_others(intr_handle))
632 		/* resume to the default handler */
633 		rte_intr_callback_register(intr_handle,
634 					   atl_dev_interrupt_handler,
635 					   (void *)dev);
636 
637 	/* Clean datapath event and queue/vec mapping */
638 	rte_intr_efd_disable(intr_handle);
639 	rte_intr_vec_list_free(intr_handle);
640 
641 	return 0;
642 }
643 
644 /*
645  * Set device link up: enable tx.
646  */
647 static int
648 atl_dev_set_link_up(struct rte_eth_dev *dev)
649 {
650 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
651 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
652 	uint32_t speed_mask = 0;
653 
654 	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
655 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
656 	} else {
657 		if (link_speeds & RTE_ETH_LINK_SPEED_10G)
658 			speed_mask |= AQ_NIC_RATE_10G;
659 		if (link_speeds & RTE_ETH_LINK_SPEED_5G)
660 			speed_mask |= AQ_NIC_RATE_5G;
661 		if (link_speeds & RTE_ETH_LINK_SPEED_1G)
662 			speed_mask |= AQ_NIC_RATE_1G;
663 		if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
664 			speed_mask |=  AQ_NIC_RATE_2G5;
665 		if (link_speeds & RTE_ETH_LINK_SPEED_100M)
666 			speed_mask |= AQ_NIC_RATE_100M;
667 	}
668 
669 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
670 }
671 
672 /*
673  * Set device link down: disable tx.
674  */
675 static int
676 atl_dev_set_link_down(struct rte_eth_dev *dev)
677 {
678 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
679 
680 	return hw->aq_fw_ops->set_link_speed(hw, 0);
681 }
682 
683 /*
684  * Reset and stop device.
685  */
686 static int
687 atl_dev_close(struct rte_eth_dev *dev)
688 {
689 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
690 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
691 	struct aq_hw_s *hw;
692 	int ret;
693 
694 	PMD_INIT_FUNC_TRACE();
695 
696 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
697 		return 0;
698 
699 	hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
700 
701 	ret = atl_dev_stop(dev);
702 
703 	atl_free_queues(dev);
704 
705 	/* disable uio intr before callback unregister */
706 	rte_intr_disable(intr_handle);
707 	rte_intr_callback_unregister(intr_handle,
708 				     atl_dev_interrupt_handler, dev);
709 
710 	pthread_mutex_destroy(&hw->mbox_mutex);
711 
712 	return ret;
713 }
714 
715 static int
716 atl_dev_reset(struct rte_eth_dev *dev)
717 {
718 	int ret;
719 
720 	ret = atl_dev_close(dev);
721 	if (ret)
722 		return ret;
723 
724 	ret = eth_atl_dev_init(dev);
725 
726 	return ret;
727 }
728 
729 static int
730 atl_dev_configure_macsec(struct rte_eth_dev *dev)
731 {
732 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
733 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
734 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
735 	struct macsec_msg_fw_request msg_macsec;
736 	struct macsec_msg_fw_response response;
737 
738 	if (!aqcfg->common.macsec_enabled ||
739 	    hw->aq_fw_ops->send_macsec_req == NULL)
740 		return 0;
741 
742 	memset(&msg_macsec, 0, sizeof(msg_macsec));
743 
744 	/* Creating set of sc/sa structures from parameters provided by DPDK */
745 
746 	/* Configure macsec */
747 	msg_macsec.msg_type = macsec_cfg_msg;
748 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
749 	msg_macsec.cfg.interrupts_enabled = 1;
750 
751 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
752 
753 	if (response.result)
754 		return -1;
755 
756 	memset(&msg_macsec, 0, sizeof(msg_macsec));
757 
758 	/* Configure TX SC */
759 
760 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
761 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
762 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
763 
764 	/* MAC addr for TX */
765 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
766 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
767 	msg_macsec.txsc.sa_mask = 0x3f;
768 
769 	msg_macsec.txsc.da_mask = 0;
770 	msg_macsec.txsc.tci = 0x0B;
771 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
772 
773 	/*
774 	 * Creating SCI (Secure Channel Identifier).
775 	 * SCI constructed from Source MAC and Port identifier
776 	 */
777 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
778 			       (msg_macsec.txsc.mac_sa[0] >> 16);
779 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
780 
781 	uint32_t port_identifier = 1;
782 
783 	msg_macsec.txsc.sci[1] = sci_hi_part;
784 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
785 
786 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
787 
788 	if (response.result)
789 		return -1;
790 
791 	memset(&msg_macsec, 0, sizeof(msg_macsec));
792 
793 	/* Configure RX SC */
794 
795 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
796 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
797 	msg_macsec.rxsc.replay_protect =
798 		aqcfg->common.replay_protection_enabled;
799 	msg_macsec.rxsc.anti_replay_window = 0;
800 
801 	/* MAC addr for RX */
802 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
803 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
804 	msg_macsec.rxsc.da_mask = 0;//0x3f;
805 
806 	msg_macsec.rxsc.sa_mask = 0;
807 
808 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
809 
810 	if (response.result)
811 		return -1;
812 
813 	memset(&msg_macsec, 0, sizeof(msg_macsec));
814 
815 	/* Configure RX SC */
816 
817 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
818 	msg_macsec.txsa.index = aqcfg->txsa.idx;
819 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
820 
821 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
822 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
823 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
824 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
825 
826 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
827 
828 	if (response.result)
829 		return -1;
830 
831 	memset(&msg_macsec, 0, sizeof(msg_macsec));
832 
833 	/* Configure RX SA */
834 
835 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
836 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
837 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
838 
839 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
840 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
841 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
842 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
843 
844 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
845 
846 	if (response.result)
847 		return -1;
848 
849 	return 0;
850 }
851 
852 int atl_macsec_enable(struct rte_eth_dev *dev,
853 		      uint8_t encr, uint8_t repl_prot)
854 {
855 	struct aq_hw_cfg_s *cfg =
856 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
857 
858 	cfg->aq_macsec.common.macsec_enabled = 1;
859 	cfg->aq_macsec.common.encryption_enabled = encr;
860 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
861 
862 	return 0;
863 }
864 
865 int atl_macsec_disable(struct rte_eth_dev *dev)
866 {
867 	struct aq_hw_cfg_s *cfg =
868 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
869 
870 	cfg->aq_macsec.common.macsec_enabled = 0;
871 
872 	return 0;
873 }
874 
875 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
876 {
877 	struct aq_hw_cfg_s *cfg =
878 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
879 
880 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
881 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
882 		RTE_ETHER_ADDR_LEN);
883 
884 	return 0;
885 }
886 
887 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
888 			   uint8_t *mac, uint16_t pi)
889 {
890 	struct aq_hw_cfg_s *cfg =
891 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
892 
893 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
894 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
895 		RTE_ETHER_ADDR_LEN);
896 	cfg->aq_macsec.rxsc.pi = pi;
897 
898 	return 0;
899 }
900 
901 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
902 			   uint8_t idx, uint8_t an,
903 			   uint32_t pn, uint8_t *key)
904 {
905 	struct aq_hw_cfg_s *cfg =
906 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
907 
908 	cfg->aq_macsec.txsa.idx = idx;
909 	cfg->aq_macsec.txsa.pn = pn;
910 	cfg->aq_macsec.txsa.an = an;
911 
912 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
913 	return 0;
914 }
915 
916 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
917 			   uint8_t idx, uint8_t an,
918 			   uint32_t pn, uint8_t *key)
919 {
920 	struct aq_hw_cfg_s *cfg =
921 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
922 
923 	cfg->aq_macsec.rxsa.idx = idx;
924 	cfg->aq_macsec.rxsa.pn = pn;
925 	cfg->aq_macsec.rxsa.an = an;
926 
927 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
928 	return 0;
929 }
930 
931 static int
932 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
933 {
934 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
935 	struct aq_hw_s *hw = &adapter->hw;
936 	struct atl_sw_stats *swstats = &adapter->sw_stats;
937 	unsigned int i;
938 
939 	hw->aq_fw_ops->update_stats(hw);
940 
941 	/* Fill out the rte_eth_stats statistics structure */
942 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
943 	stats->ibytes = hw->curr_stats.dma_oct_rc;
944 	stats->imissed = hw->curr_stats.dpc;
945 	stats->ierrors = hw->curr_stats.erpt;
946 
947 	stats->opackets = hw->curr_stats.dma_pkt_tc;
948 	stats->obytes = hw->curr_stats.dma_oct_tc;
949 	stats->oerrors = 0;
950 
951 	stats->rx_nombuf = swstats->rx_nombuf;
952 
953 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
954 		stats->q_ipackets[i] = swstats->q_ipackets[i];
955 		stats->q_opackets[i] = swstats->q_opackets[i];
956 		stats->q_ibytes[i] = swstats->q_ibytes[i];
957 		stats->q_obytes[i] = swstats->q_obytes[i];
958 		stats->q_errors[i] = swstats->q_errors[i];
959 	}
960 	return 0;
961 }
962 
963 static int
964 atl_dev_stats_reset(struct rte_eth_dev *dev)
965 {
966 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
967 	struct aq_hw_s *hw = &adapter->hw;
968 
969 	hw->aq_fw_ops->update_stats(hw);
970 
971 	/* Reset software totals */
972 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
973 
974 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
975 
976 	return 0;
977 }
978 
979 static int
980 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
981 {
982 	struct atl_adapter *adapter =
983 		(struct atl_adapter *)dev->data->dev_private;
984 
985 	struct aq_hw_s *hw = &adapter->hw;
986 	unsigned int i, count = 0;
987 
988 	for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
989 		if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
990 			((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
991 			continue;
992 
993 		count++;
994 	}
995 
996 	return count;
997 }
998 
999 static int
1000 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1001 			 struct rte_eth_xstat_name *xstats_names,
1002 			 unsigned int size)
1003 {
1004 	unsigned int i;
1005 	unsigned int count = atl_dev_xstats_get_count(dev);
1006 
1007 	if (xstats_names) {
1008 		for (i = 0; i < size && i < count; i++) {
1009 			snprintf(xstats_names[i].name,
1010 				RTE_ETH_XSTATS_NAME_SIZE, "%s",
1011 				atl_xstats_tbl[i].name);
1012 		}
1013 	}
1014 
1015 	return count;
1016 }
1017 
1018 static int
1019 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1020 		   unsigned int n)
1021 {
1022 	struct atl_adapter *adapter = dev->data->dev_private;
1023 	struct aq_hw_s *hw = &adapter->hw;
1024 	struct get_stats req = { 0 };
1025 	struct macsec_msg_fw_request msg = { 0 };
1026 	struct macsec_msg_fw_response resp = { 0 };
1027 	int err = -1;
1028 	unsigned int i;
1029 	unsigned int count = atl_dev_xstats_get_count(dev);
1030 
1031 	if (!stats)
1032 		return count;
1033 
1034 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1035 		req.ingress_sa_index = 0xff;
1036 		req.egress_sc_index = 0xff;
1037 		req.egress_sa_index = 0xff;
1038 
1039 		msg.msg_type = macsec_get_stats_msg;
1040 		msg.stats = req;
1041 
1042 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1043 	}
1044 
1045 	for (i = 0; i < n && i < count; i++) {
1046 		stats[i].id = i;
1047 
1048 		switch (atl_xstats_tbl[i].type) {
1049 		case XSTATS_TYPE_MSM:
1050 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1051 					 atl_xstats_tbl[i].offset);
1052 			break;
1053 		case XSTATS_TYPE_MACSEC:
1054 			if (!err) {
1055 				stats[i].value =
1056 					*(u64 *)((uint8_t *)&resp.stats +
1057 					atl_xstats_tbl[i].offset);
1058 			}
1059 			break;
1060 		}
1061 	}
1062 
1063 	return i;
1064 }
1065 
1066 static int
1067 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1068 {
1069 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1070 	uint32_t fw_ver = 0;
1071 	int ret = 0;
1072 
1073 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1074 	if (ret)
1075 		return -EIO;
1076 
1077 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1078 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1079 	if (ret < 0)
1080 		return -EINVAL;
1081 
1082 	ret += 1; /* add string null-terminator */
1083 	if (fw_size < (size_t)ret)
1084 		return ret;
1085 
1086 	return 0;
1087 }
1088 
1089 static int
1090 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1091 {
1092 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1093 
1094 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1095 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1096 
1097 	dev_info->min_rx_bufsize = 1024;
1098 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1099 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1100 	dev_info->max_vfs = pci_dev->max_vfs;
1101 
1102 	dev_info->max_hash_mac_addrs = 0;
1103 	dev_info->max_vmdq_pools = 0;
1104 	dev_info->vmdq_queue_num = 0;
1105 
1106 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1107 
1108 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1109 
1110 
1111 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1112 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1113 	};
1114 
1115 	dev_info->default_txconf = (struct rte_eth_txconf) {
1116 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1117 	};
1118 
1119 	dev_info->rx_desc_lim = rx_desc_lim;
1120 	dev_info->tx_desc_lim = tx_desc_lim;
1121 
1122 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1123 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1124 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1125 
1126 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
1127 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
1128 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
1129 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
1130 
1131 	return 0;
1132 }
1133 
1134 static const uint32_t *
1135 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1136 {
1137 	static const uint32_t ptypes[] = {
1138 		RTE_PTYPE_L2_ETHER,
1139 		RTE_PTYPE_L2_ETHER_ARP,
1140 		RTE_PTYPE_L2_ETHER_VLAN,
1141 		RTE_PTYPE_L3_IPV4,
1142 		RTE_PTYPE_L3_IPV6,
1143 		RTE_PTYPE_L4_TCP,
1144 		RTE_PTYPE_L4_UDP,
1145 		RTE_PTYPE_L4_SCTP,
1146 		RTE_PTYPE_L4_ICMP,
1147 		RTE_PTYPE_UNKNOWN
1148 	};
1149 
1150 	if (dev->rx_pkt_burst == atl_recv_pkts)
1151 		return ptypes;
1152 
1153 	return NULL;
1154 }
1155 
1156 static void
1157 atl_dev_delayed_handler(void *param)
1158 {
1159 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1160 
1161 	atl_dev_configure_macsec(dev);
1162 }
1163 
1164 
1165 /* return 0 means link status changed, -1 means not changed */
1166 static int
1167 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1168 {
1169 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1170 	struct rte_eth_link link, old;
1171 	u32 fc = AQ_NIC_FC_OFF;
1172 	int err = 0;
1173 
1174 	link.link_status = RTE_ETH_LINK_DOWN;
1175 	link.link_speed = 0;
1176 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1177 	link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
1178 	memset(&old, 0, sizeof(old));
1179 
1180 	/* load old link status */
1181 	rte_eth_linkstatus_get(dev, &old);
1182 
1183 	/* read current link status */
1184 	err = hw->aq_fw_ops->update_link_status(hw);
1185 
1186 	if (err)
1187 		return 0;
1188 
1189 	if (hw->aq_link_status.mbps == 0) {
1190 		/* write default (down) link status */
1191 		rte_eth_linkstatus_set(dev, &link);
1192 		if (link.link_status == old.link_status)
1193 			return -1;
1194 		return 0;
1195 	}
1196 
1197 	link.link_status = RTE_ETH_LINK_UP;
1198 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1199 	link.link_speed = hw->aq_link_status.mbps;
1200 
1201 	rte_eth_linkstatus_set(dev, &link);
1202 
1203 	if (link.link_status == old.link_status)
1204 		return -1;
1205 
1206 	/* Driver has to update flow control settings on RX block
1207 	 * on any link event.
1208 	 * We should query FW whether it negotiated FC.
1209 	 */
1210 	if (hw->aq_fw_ops->get_flow_control) {
1211 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1212 		hw_atl_b0_set_fc(hw, fc, 0U);
1213 	}
1214 
1215 	if (rte_eal_alarm_set(1000 * 1000,
1216 			      atl_dev_delayed_handler, (void *)dev) < 0)
1217 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1218 
1219 	return 0;
1220 }
1221 
1222 static int
1223 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1224 {
1225 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1226 
1227 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1228 
1229 	return 0;
1230 }
1231 
1232 static int
1233 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1234 {
1235 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1236 
1237 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1238 
1239 	return 0;
1240 }
1241 
1242 static int
1243 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1244 {
1245 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1246 
1247 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1248 
1249 	return 0;
1250 }
1251 
1252 static int
1253 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1254 {
1255 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1256 
1257 	if (dev->data->promiscuous == 1)
1258 		return 0; /* must remain in all_multicast mode */
1259 
1260 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1261 
1262 	return 0;
1263 }
1264 
1265 /**
1266  * It clears the interrupt causes and enables the interrupt.
1267  * It will be called once only during nic initialized.
1268  *
1269  * @param dev
1270  *  Pointer to struct rte_eth_dev.
1271  * @param on
1272  *  Enable or Disable.
1273  *
1274  * @return
1275  *  - On success, zero.
1276  *  - On failure, a negative value.
1277  */
1278 
1279 static int
1280 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1281 {
1282 	atl_dev_link_status_print(dev);
1283 	return 0;
1284 }
1285 
1286 static int
1287 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1288 {
1289 	return 0;
1290 }
1291 
1292 
1293 static int
1294 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1295 {
1296 	struct atl_interrupt *intr =
1297 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1298 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1299 	u64 cause = 0;
1300 
1301 	hw_atl_b0_hw_irq_read(hw, &cause);
1302 
1303 	atl_disable_intr(hw);
1304 
1305 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1306 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1307 
1308 	return 0;
1309 }
1310 
1311 /**
1312  * It gets and then prints the link status.
1313  *
1314  * @param dev
1315  *  Pointer to struct rte_eth_dev.
1316  *
1317  * @return
1318  *  - On success, zero.
1319  *  - On failure, a negative value.
1320  */
1321 static void
1322 atl_dev_link_status_print(struct rte_eth_dev *dev)
1323 {
1324 	struct rte_eth_link link;
1325 
1326 	memset(&link, 0, sizeof(link));
1327 	rte_eth_linkstatus_get(dev, &link);
1328 	if (link.link_status) {
1329 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1330 					(int)(dev->data->port_id),
1331 					(unsigned int)link.link_speed,
1332 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1333 					"full-duplex" : "half-duplex");
1334 	} else {
1335 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1336 				(int)(dev->data->port_id));
1337 	}
1338 
1339 
1340 #ifdef DEBUG
1341 {
1342 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1343 
1344 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1345 				pci_dev->addr.domain,
1346 				pci_dev->addr.bus,
1347 				pci_dev->addr.devid,
1348 				pci_dev->addr.function);
1349 }
1350 #endif
1351 
1352 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1353 }
1354 
1355 /*
1356  * It executes link_update after knowing an interrupt occurred.
1357  *
1358  * @param dev
1359  *  Pointer to struct rte_eth_dev.
1360  *
1361  * @return
1362  *  - On success, zero.
1363  *  - On failure, a negative value.
1364  */
1365 static int
1366 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1367 			   struct rte_intr_handle *intr_handle)
1368 {
1369 	struct atl_interrupt *intr =
1370 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1371 	struct atl_adapter *adapter = dev->data->dev_private;
1372 	struct aq_hw_s *hw = &adapter->hw;
1373 
1374 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1375 		goto done;
1376 
1377 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1378 
1379 	/* Notify userapp if link status changed */
1380 	if (!atl_dev_link_update(dev, 0)) {
1381 		atl_dev_link_status_print(dev);
1382 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1383 	} else {
1384 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1385 			goto done;
1386 
1387 		/* Check macsec Keys expired */
1388 		struct get_stats req = { 0 };
1389 		struct macsec_msg_fw_request msg = { 0 };
1390 		struct macsec_msg_fw_response resp = { 0 };
1391 
1392 		req.ingress_sa_index = 0x0;
1393 		req.egress_sc_index = 0x0;
1394 		req.egress_sa_index = 0x0;
1395 		msg.msg_type = macsec_get_stats_msg;
1396 		msg.stats = req;
1397 
1398 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1399 		if (err) {
1400 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1401 			goto done;
1402 		}
1403 		if (resp.stats.egress_threshold_expired ||
1404 		    resp.stats.ingress_threshold_expired ||
1405 		    resp.stats.egress_expired ||
1406 		    resp.stats.ingress_expired) {
1407 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1408 			rte_eth_dev_callback_process(dev,
1409 				RTE_ETH_EVENT_MACSEC, NULL);
1410 		}
1411 	}
1412 done:
1413 	atl_enable_intr(dev);
1414 	rte_intr_ack(intr_handle);
1415 
1416 	return 0;
1417 }
1418 
1419 /**
1420  * Interrupt handler triggered by NIC  for handling
1421  * specific interrupt.
1422  *
1423  * @param handle
1424  *  Pointer to interrupt handle.
1425  * @param param
1426  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1427  *
1428  * @return
1429  *  void
1430  */
1431 static void
1432 atl_dev_interrupt_handler(void *param)
1433 {
1434 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1435 
1436 	atl_dev_interrupt_get_status(dev);
1437 	atl_dev_interrupt_action(dev, dev->intr_handle);
1438 }
1439 
1440 
1441 static int
1442 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1443 {
1444 	return SFP_EEPROM_SIZE;
1445 }
1446 
1447 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1448 		       struct rte_dev_eeprom_info *eeprom)
1449 {
1450 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1451 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1452 
1453 	if (hw->aq_fw_ops->get_eeprom == NULL)
1454 		return -ENOTSUP;
1455 
1456 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1457 	    eeprom->data == NULL)
1458 		return -EINVAL;
1459 
1460 	if (eeprom->magic > 0x7F)
1461 		return -EINVAL;
1462 
1463 	if (eeprom->magic)
1464 		dev_addr = eeprom->magic;
1465 
1466 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1467 					 eeprom->length, eeprom->offset);
1468 }
1469 
1470 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1471 		       struct rte_dev_eeprom_info *eeprom)
1472 {
1473 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1474 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1475 
1476 	if (hw->aq_fw_ops->set_eeprom == NULL)
1477 		return -ENOTSUP;
1478 
1479 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1480 	    eeprom->data == NULL)
1481 		return -EINVAL;
1482 
1483 	if (eeprom->magic > 0x7F)
1484 		return -EINVAL;
1485 
1486 	if (eeprom->magic)
1487 		dev_addr = eeprom->magic;
1488 
1489 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1490 					 eeprom->length, eeprom->offset);
1491 }
1492 
1493 static int
1494 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1495 {
1496 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1497 	u32 mif_id;
1498 	int err;
1499 
1500 	if (regs->data == NULL) {
1501 		regs->length = hw_atl_utils_hw_get_reg_length();
1502 		regs->width = sizeof(u32);
1503 		return 0;
1504 	}
1505 
1506 	/* Only full register dump is supported */
1507 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1508 		return -ENOTSUP;
1509 
1510 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1511 
1512 	/* Device version */
1513 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1514 	regs->version = mif_id & 0xFFU;
1515 
1516 	return err;
1517 }
1518 
1519 static int
1520 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1521 {
1522 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1523 	u32 fc = AQ_NIC_FC_OFF;
1524 
1525 	if (hw->aq_fw_ops->get_flow_control == NULL)
1526 		return -ENOTSUP;
1527 
1528 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1529 
1530 	if (fc == AQ_NIC_FC_OFF)
1531 		fc_conf->mode = RTE_ETH_FC_NONE;
1532 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1533 		fc_conf->mode = RTE_ETH_FC_FULL;
1534 	else if (fc & AQ_NIC_FC_RX)
1535 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
1536 	else if (fc & AQ_NIC_FC_TX)
1537 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
1538 
1539 	return 0;
1540 }
1541 
1542 static int
1543 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1544 {
1545 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1546 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1547 
1548 
1549 	if (hw->aq_fw_ops->set_flow_control == NULL)
1550 		return -ENOTSUP;
1551 
1552 	if (fc_conf->mode == RTE_ETH_FC_NONE)
1553 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1554 	else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
1555 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1556 	else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
1557 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1558 	else if (fc_conf->mode == RTE_ETH_FC_FULL)
1559 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1560 
1561 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1562 		return hw->aq_fw_ops->set_flow_control(hw);
1563 
1564 	return 0;
1565 }
1566 
1567 static int
1568 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1569 		    u8 *mac_addr, bool enable)
1570 {
1571 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1572 	unsigned int h = 0U;
1573 	unsigned int l = 0U;
1574 	int err;
1575 
1576 	if (mac_addr) {
1577 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1578 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1579 			(mac_addr[4] << 8) | mac_addr[5];
1580 	}
1581 
1582 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1583 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1584 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1585 
1586 	if (enable)
1587 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1588 
1589 	err = aq_hw_err_from_flags(hw);
1590 
1591 	return err;
1592 }
1593 
1594 static int
1595 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1596 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1597 {
1598 	if (rte_is_zero_ether_addr(mac_addr)) {
1599 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1600 		return -EINVAL;
1601 	}
1602 
1603 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1604 }
1605 
1606 static void
1607 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1608 {
1609 	atl_update_mac_addr(dev, index, NULL, false);
1610 }
1611 
1612 static int
1613 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1614 {
1615 	atl_remove_mac_addr(dev, 0);
1616 	atl_add_mac_addr(dev, addr, 0, 0);
1617 	return 0;
1618 }
1619 
1620 static int
1621 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1622 {
1623 	struct rte_eth_dev_info dev_info;
1624 	int ret;
1625 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1626 
1627 	ret = atl_dev_info_get(dev, &dev_info);
1628 	if (ret != 0)
1629 		return ret;
1630 
1631 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1632 		return -EINVAL;
1633 
1634 	return 0;
1635 }
1636 
1637 static int
1638 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1639 {
1640 	struct aq_hw_cfg_s *cfg =
1641 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1642 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1643 	int err = 0;
1644 	int i = 0;
1645 
1646 	PMD_INIT_FUNC_TRACE();
1647 
1648 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1649 		if (cfg->vlan_filter[i] == vlan_id) {
1650 			if (!on) {
1651 				/* Disable VLAN filter. */
1652 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1653 
1654 				/* Clear VLAN filter entry */
1655 				cfg->vlan_filter[i] = 0;
1656 			}
1657 			break;
1658 		}
1659 	}
1660 
1661 	/* VLAN_ID was not found. So, nothing to delete. */
1662 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1663 		goto exit;
1664 
1665 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1666 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1667 		goto exit;
1668 
1669 	/* Try to found free VLAN filter to add new VLAN_ID */
1670 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1671 		if (cfg->vlan_filter[i] == 0)
1672 			break;
1673 	}
1674 
1675 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1676 		/* We have no free VLAN filter to add new VLAN_ID*/
1677 		err = -ENOMEM;
1678 		goto exit;
1679 	}
1680 
1681 	cfg->vlan_filter[i] = vlan_id;
1682 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1683 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1684 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1685 
1686 exit:
1687 	/* Enable VLAN promisc mode if vlan_filter empty  */
1688 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1689 		if (cfg->vlan_filter[i] != 0)
1690 			break;
1691 	}
1692 
1693 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1694 
1695 	return err;
1696 }
1697 
1698 static int
1699 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1700 {
1701 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1702 	struct aq_hw_cfg_s *cfg =
1703 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1704 	int i;
1705 
1706 	PMD_INIT_FUNC_TRACE();
1707 
1708 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1709 		if (cfg->vlan_filter[i])
1710 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1711 	}
1712 	return 0;
1713 }
1714 
1715 static int
1716 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1717 {
1718 	struct aq_hw_cfg_s *cfg =
1719 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1720 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1721 	int ret = 0;
1722 	int i;
1723 
1724 	PMD_INIT_FUNC_TRACE();
1725 
1726 	ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
1727 
1728 	cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
1729 
1730 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1731 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1732 
1733 	if (mask & RTE_ETH_VLAN_EXTEND_MASK)
1734 		ret = -ENOTSUP;
1735 
1736 	return ret;
1737 }
1738 
1739 static int
1740 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1741 		  uint16_t tpid)
1742 {
1743 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1744 	int err = 0;
1745 
1746 	PMD_INIT_FUNC_TRACE();
1747 
1748 	switch (vlan_type) {
1749 	case RTE_ETH_VLAN_TYPE_INNER:
1750 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1751 		break;
1752 	case RTE_ETH_VLAN_TYPE_OUTER:
1753 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1754 		break;
1755 	default:
1756 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1757 		err = -ENOTSUP;
1758 	}
1759 
1760 	return err;
1761 }
1762 
1763 static void
1764 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1765 {
1766 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1767 
1768 	PMD_INIT_FUNC_TRACE();
1769 
1770 	if (queue_id > dev->data->nb_rx_queues) {
1771 		PMD_DRV_LOG(ERR, "Invalid queue id");
1772 		return;
1773 	}
1774 
1775 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1776 }
1777 
1778 static int
1779 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1780 			  struct rte_ether_addr *mc_addr_set,
1781 			  uint32_t nb_mc_addr)
1782 {
1783 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1784 	u32 i;
1785 
1786 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1787 		return -EINVAL;
1788 
1789 	/* Update whole uc filters table */
1790 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1791 		u8 *mac_addr = NULL;
1792 		u32 l = 0, h = 0;
1793 
1794 		if (i < nb_mc_addr) {
1795 			mac_addr = mc_addr_set[i].addr_bytes;
1796 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1797 				(mac_addr[4] << 8) | mac_addr[5];
1798 			h = (mac_addr[0] << 8) | mac_addr[1];
1799 		}
1800 
1801 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1802 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1803 							HW_ATL_B0_MAC_MIN + i);
1804 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1805 							HW_ATL_B0_MAC_MIN + i);
1806 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1807 					   HW_ATL_B0_MAC_MIN + i);
1808 	}
1809 
1810 	return 0;
1811 }
1812 
1813 static int
1814 atl_reta_update(struct rte_eth_dev *dev,
1815 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1816 		   uint16_t reta_size)
1817 {
1818 	int i;
1819 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1820 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1821 
1822 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1823 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1824 					dev->data->nb_rx_queues - 1);
1825 
1826 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1827 	return 0;
1828 }
1829 
1830 static int
1831 atl_reta_query(struct rte_eth_dev *dev,
1832 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1833 		    uint16_t reta_size)
1834 {
1835 	int i;
1836 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1837 
1838 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1839 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1840 	reta_conf->mask = ~0U;
1841 	return 0;
1842 }
1843 
1844 static int
1845 atl_rss_hash_update(struct rte_eth_dev *dev,
1846 				 struct rte_eth_rss_conf *rss_conf)
1847 {
1848 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1849 	struct aq_hw_cfg_s *cfg =
1850 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1851 	static u8 def_rss_key[40] = {
1852 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1853 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1854 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1855 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1856 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1857 	};
1858 
1859 	cfg->is_rss = !!rss_conf->rss_hf;
1860 	if (rss_conf->rss_key) {
1861 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1862 		       rss_conf->rss_key_len);
1863 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1864 	} else {
1865 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1866 		       sizeof(def_rss_key));
1867 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1868 	}
1869 
1870 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1871 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1872 	return 0;
1873 }
1874 
1875 static int
1876 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1877 				 struct rte_eth_rss_conf *rss_conf)
1878 {
1879 	struct aq_hw_cfg_s *cfg =
1880 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1881 
1882 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1883 	if (rss_conf->rss_key) {
1884 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1885 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1886 		       rss_conf->rss_key_len);
1887 	}
1888 
1889 	return 0;
1890 }
1891 
1892 static bool
1893 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1894 {
1895 	if (strcmp(dev->device->driver->name, drv->driver.name))
1896 		return false;
1897 
1898 	return true;
1899 }
1900 
1901 bool
1902 is_atlantic_supported(struct rte_eth_dev *dev)
1903 {
1904 	return is_device_supported(dev, &rte_atl_pmd);
1905 }
1906 
1907 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1908 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1909 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1910 RTE_LOG_REGISTER_SUFFIX(atl_logtype_init, init, NOTICE);
1911 RTE_LOG_REGISTER_SUFFIX(atl_logtype_driver, driver, NOTICE);
1912