xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision bbbe38a6d59ccdda25917712701e629d0b10af6f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static int atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static int  atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
47 
48 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
49 
50 /* VLAN stuff */
51 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
52 		uint16_t vlan_id, int on);
53 
54 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
55 
56 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
57 				     uint16_t queue_id, int on);
58 
59 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
60 			     enum rte_vlan_type vlan_type, uint16_t tpid);
61 
62 /* EEPROM */
63 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
64 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
65 			      struct rte_dev_eeprom_info *eeprom);
66 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
67 			      struct rte_dev_eeprom_info *eeprom);
68 
69 /* Regs */
70 static int atl_dev_get_regs(struct rte_eth_dev *dev,
71 			    struct rte_dev_reg_info *regs);
72 
73 /* Flow control */
74 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
75 			       struct rte_eth_fc_conf *fc_conf);
76 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
77 			       struct rte_eth_fc_conf *fc_conf);
78 
79 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
80 
81 /* Interrupts */
82 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
83 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
84 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
85 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
86 				    struct rte_intr_handle *handle);
87 static void atl_dev_interrupt_handler(void *param);
88 
89 
90 static int atl_add_mac_addr(struct rte_eth_dev *dev,
91 			    struct rte_ether_addr *mac_addr,
92 			    uint32_t index, uint32_t pool);
93 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
94 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
95 					   struct rte_ether_addr *mac_addr);
96 
97 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
98 				    struct rte_ether_addr *mc_addr_set,
99 				    uint32_t nb_mc_addr);
100 
101 /* RSS */
102 static int atl_reta_update(struct rte_eth_dev *dev,
103 			     struct rte_eth_rss_reta_entry64 *reta_conf,
104 			     uint16_t reta_size);
105 static int atl_reta_query(struct rte_eth_dev *dev,
106 			    struct rte_eth_rss_reta_entry64 *reta_conf,
107 			    uint16_t reta_size);
108 static int atl_rss_hash_update(struct rte_eth_dev *dev,
109 				 struct rte_eth_rss_conf *rss_conf);
110 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
111 				   struct rte_eth_rss_conf *rss_conf);
112 
113 
114 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
115 	struct rte_pci_device *pci_dev);
116 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
117 
118 static int atl_dev_info_get(struct rte_eth_dev *dev,
119 				struct rte_eth_dev_info *dev_info);
120 
121 /*
122  * The set of PCI devices this driver supports
123  */
124 static const struct rte_pci_id pci_id_atl_map[] = {
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
129 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
130 
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
137 
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
144 
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
147 	{ .vendor_id = 0, /* sentinel */ },
148 };
149 
150 static struct rte_pci_driver rte_atl_pmd = {
151 	.id_table = pci_id_atl_map,
152 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
153 	.probe = eth_atl_pci_probe,
154 	.remove = eth_atl_pci_remove,
155 };
156 
157 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
158 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
159 			| DEV_RX_OFFLOAD_UDP_CKSUM \
160 			| DEV_RX_OFFLOAD_TCP_CKSUM \
161 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
162 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
163 			| DEV_RX_OFFLOAD_VLAN_FILTER)
164 
165 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
166 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
167 			| DEV_TX_OFFLOAD_UDP_CKSUM \
168 			| DEV_TX_OFFLOAD_TCP_CKSUM \
169 			| DEV_TX_OFFLOAD_TCP_TSO \
170 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
171 			| DEV_TX_OFFLOAD_MULTI_SEGS)
172 
173 #define SFP_EEPROM_SIZE 0x100
174 
175 static const struct rte_eth_desc_lim rx_desc_lim = {
176 	.nb_max = ATL_MAX_RING_DESC,
177 	.nb_min = ATL_MIN_RING_DESC,
178 	.nb_align = ATL_RXD_ALIGN,
179 };
180 
181 static const struct rte_eth_desc_lim tx_desc_lim = {
182 	.nb_max = ATL_MAX_RING_DESC,
183 	.nb_min = ATL_MIN_RING_DESC,
184 	.nb_align = ATL_TXD_ALIGN,
185 	.nb_seg_max = ATL_TX_MAX_SEG,
186 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
187 };
188 
189 enum atl_xstats_type {
190 	XSTATS_TYPE_MSM = 0,
191 	XSTATS_TYPE_MACSEC,
192 };
193 
194 #define ATL_XSTATS_FIELD(name) { \
195 	#name, \
196 	offsetof(struct aq_stats_s, name), \
197 	XSTATS_TYPE_MSM \
198 }
199 
200 #define ATL_MACSEC_XSTATS_FIELD(name) { \
201 	#name, \
202 	offsetof(struct macsec_stats, name), \
203 	XSTATS_TYPE_MACSEC \
204 }
205 
206 struct atl_xstats_tbl_s {
207 	const char *name;
208 	unsigned int offset;
209 	enum atl_xstats_type type;
210 };
211 
212 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
213 	ATL_XSTATS_FIELD(uprc),
214 	ATL_XSTATS_FIELD(mprc),
215 	ATL_XSTATS_FIELD(bprc),
216 	ATL_XSTATS_FIELD(erpt),
217 	ATL_XSTATS_FIELD(uptc),
218 	ATL_XSTATS_FIELD(mptc),
219 	ATL_XSTATS_FIELD(bptc),
220 	ATL_XSTATS_FIELD(erpr),
221 	ATL_XSTATS_FIELD(ubrc),
222 	ATL_XSTATS_FIELD(ubtc),
223 	ATL_XSTATS_FIELD(mbrc),
224 	ATL_XSTATS_FIELD(mbtc),
225 	ATL_XSTATS_FIELD(bbrc),
226 	ATL_XSTATS_FIELD(bbtc),
227 	/* Ingress Common Counters */
228 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
229 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
230 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
231 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
232 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
233 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
234 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
235 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
236 	/* Ingress SA Counters */
237 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
239 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
240 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
244 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
245 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
246 	/* Egress Common Counters */
247 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
250 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
251 	/* Egress SC Counters */
252 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
254 	/* Egress SA Counters */
255 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
256 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
259 };
260 
261 static const struct eth_dev_ops atl_eth_dev_ops = {
262 	.dev_configure	      = atl_dev_configure,
263 	.dev_start	      = atl_dev_start,
264 	.dev_stop	      = atl_dev_stop,
265 	.dev_set_link_up      = atl_dev_set_link_up,
266 	.dev_set_link_down    = atl_dev_set_link_down,
267 	.dev_close	      = atl_dev_close,
268 	.dev_reset	      = atl_dev_reset,
269 
270 	/* PROMISC */
271 	.promiscuous_enable   = atl_dev_promiscuous_enable,
272 	.promiscuous_disable  = atl_dev_promiscuous_disable,
273 	.allmulticast_enable  = atl_dev_allmulticast_enable,
274 	.allmulticast_disable = atl_dev_allmulticast_disable,
275 
276 	/* Link */
277 	.link_update	      = atl_dev_link_update,
278 
279 	.get_reg              = atl_dev_get_regs,
280 
281 	/* Stats */
282 	.stats_get	      = atl_dev_stats_get,
283 	.xstats_get	      = atl_dev_xstats_get,
284 	.xstats_get_names     = atl_dev_xstats_get_names,
285 	.stats_reset	      = atl_dev_stats_reset,
286 	.xstats_reset	      = atl_dev_stats_reset,
287 
288 	.fw_version_get       = atl_fw_version_get,
289 	.dev_infos_get	      = atl_dev_info_get,
290 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
291 
292 	.mtu_set              = atl_dev_mtu_set,
293 
294 	/* VLAN */
295 	.vlan_filter_set      = atl_vlan_filter_set,
296 	.vlan_offload_set     = atl_vlan_offload_set,
297 	.vlan_tpid_set        = atl_vlan_tpid_set,
298 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
299 
300 	/* Queue Control */
301 	.rx_queue_start	      = atl_rx_queue_start,
302 	.rx_queue_stop	      = atl_rx_queue_stop,
303 	.rx_queue_setup       = atl_rx_queue_setup,
304 	.rx_queue_release     = atl_rx_queue_release,
305 
306 	.tx_queue_start	      = atl_tx_queue_start,
307 	.tx_queue_stop	      = atl_tx_queue_stop,
308 	.tx_queue_setup       = atl_tx_queue_setup,
309 	.tx_queue_release     = atl_tx_queue_release,
310 
311 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
312 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
313 
314 	/* EEPROM */
315 	.get_eeprom_length    = atl_dev_get_eeprom_length,
316 	.get_eeprom           = atl_dev_get_eeprom,
317 	.set_eeprom           = atl_dev_set_eeprom,
318 
319 	/* Flow Control */
320 	.flow_ctrl_get	      = atl_flow_ctrl_get,
321 	.flow_ctrl_set	      = atl_flow_ctrl_set,
322 
323 	/* MAC */
324 	.mac_addr_add	      = atl_add_mac_addr,
325 	.mac_addr_remove      = atl_remove_mac_addr,
326 	.mac_addr_set	      = atl_set_default_mac_addr,
327 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
328 	.rxq_info_get	      = atl_rxq_info_get,
329 	.txq_info_get	      = atl_txq_info_get,
330 
331 	.reta_update          = atl_reta_update,
332 	.reta_query           = atl_reta_query,
333 	.rss_hash_update      = atl_rss_hash_update,
334 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
335 };
336 
337 static inline int32_t
338 atl_reset_hw(struct aq_hw_s *hw)
339 {
340 	return hw_atl_b0_hw_reset(hw);
341 }
342 
343 static inline void
344 atl_enable_intr(struct rte_eth_dev *dev)
345 {
346 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
347 
348 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
349 }
350 
351 static void
352 atl_disable_intr(struct aq_hw_s *hw)
353 {
354 	PMD_INIT_FUNC_TRACE();
355 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
356 }
357 
358 static int
359 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
360 {
361 	struct atl_adapter *adapter = eth_dev->data->dev_private;
362 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
363 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
364 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
365 	int err = 0;
366 
367 	PMD_INIT_FUNC_TRACE();
368 
369 	eth_dev->dev_ops = &atl_eth_dev_ops;
370 
371 	eth_dev->rx_queue_count       = atl_rx_queue_count;
372 	eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
373 	eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
374 
375 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
376 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
377 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
378 
379 	/* For secondary processes, the primary process has done all the work */
380 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
381 		return 0;
382 
383 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
384 
385 	/* Vendor and Device ID need to be set before init of shared code */
386 	hw->device_id = pci_dev->id.device_id;
387 	hw->vendor_id = pci_dev->id.vendor_id;
388 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
389 
390 	/* Hardware configuration - hardcode */
391 	adapter->hw_cfg.is_lro = false;
392 	adapter->hw_cfg.wol = false;
393 	adapter->hw_cfg.is_rss = false;
394 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
395 
396 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
397 			  AQ_NIC_RATE_5G |
398 			  AQ_NIC_RATE_2G5 |
399 			  AQ_NIC_RATE_1G |
400 			  AQ_NIC_RATE_100M;
401 
402 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
403 	adapter->hw_cfg.aq_rss.indirection_table_size =
404 		HW_ATL_B0_RSS_REDIRECTION_MAX;
405 
406 	hw->aq_nic_cfg = &adapter->hw_cfg;
407 
408 	pthread_mutex_init(&hw->mbox_mutex, NULL);
409 
410 	/* disable interrupt */
411 	atl_disable_intr(hw);
412 
413 	/* Allocate memory for storing MAC addresses */
414 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415 					RTE_ETHER_ADDR_LEN, 0);
416 	if (eth_dev->data->mac_addrs == NULL) {
417 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
418 		return -ENOMEM;
419 	}
420 
421 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
422 	if (err)
423 		return err;
424 
425 	/* Copy the permanent MAC address */
426 	if (hw->aq_fw_ops->get_mac_permanent(hw,
427 			eth_dev->data->mac_addrs->addr_bytes) != 0)
428 		return -EINVAL;
429 
430 	/* Reset the hw statistics */
431 	atl_dev_stats_reset(eth_dev);
432 
433 	rte_intr_callback_register(intr_handle,
434 				   atl_dev_interrupt_handler, eth_dev);
435 
436 	/* enable uio/vfio intr/eventfd mapping */
437 	rte_intr_enable(intr_handle);
438 
439 	/* enable support intr */
440 	atl_enable_intr(eth_dev);
441 
442 	return err;
443 }
444 
445 static int
446 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
447 	struct rte_pci_device *pci_dev)
448 {
449 	return rte_eth_dev_pci_generic_probe(pci_dev,
450 		sizeof(struct atl_adapter), eth_atl_dev_init);
451 }
452 
453 static int
454 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
455 {
456 	return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close);
457 }
458 
459 static int
460 atl_dev_configure(struct rte_eth_dev *dev)
461 {
462 	struct atl_interrupt *intr =
463 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
464 
465 	PMD_INIT_FUNC_TRACE();
466 
467 	/* set flag to update link status after init */
468 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
469 
470 	return 0;
471 }
472 
473 /*
474  * Configure device link speed and setup link.
475  * It returns 0 on success.
476  */
477 static int
478 atl_dev_start(struct rte_eth_dev *dev)
479 {
480 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
481 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
482 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
483 	uint32_t intr_vector = 0;
484 	int status;
485 	int err;
486 
487 	PMD_INIT_FUNC_TRACE();
488 
489 	/* set adapter started */
490 	hw->adapter_stopped = 0;
491 
492 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
493 		PMD_INIT_LOG(ERR,
494 		"Invalid link_speeds for port %u, fix speed not supported",
495 				dev->data->port_id);
496 		return -EINVAL;
497 	}
498 
499 	/* disable uio/vfio intr/eventfd mapping */
500 	rte_intr_disable(intr_handle);
501 
502 	/* reinitialize adapter
503 	 * this calls reset and start
504 	 */
505 	status = atl_reset_hw(hw);
506 	if (status != 0)
507 		return -EIO;
508 
509 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
510 
511 	hw_atl_b0_hw_start(hw);
512 	/* check and configure queue intr-vector mapping */
513 	if ((rte_intr_cap_multiple(intr_handle) ||
514 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
515 	    dev->data->dev_conf.intr_conf.rxq != 0) {
516 		intr_vector = dev->data->nb_rx_queues;
517 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
518 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
519 					ATL_MAX_INTR_QUEUE_NUM);
520 			return -ENOTSUP;
521 		}
522 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
523 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
524 			return -1;
525 		}
526 	}
527 
528 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
529 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
530 				    dev->data->nb_rx_queues * sizeof(int), 0);
531 		if (intr_handle->intr_vec == NULL) {
532 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
533 				     " intr_vec", dev->data->nb_rx_queues);
534 			return -ENOMEM;
535 		}
536 	}
537 
538 	/* initialize transmission unit */
539 	atl_tx_init(dev);
540 
541 	/* This can fail when allocating mbufs for descriptor rings */
542 	err = atl_rx_init(dev);
543 	if (err) {
544 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
545 		goto error;
546 	}
547 
548 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
549 		hw->fw_ver_actual >> 24,
550 		(hw->fw_ver_actual >> 16) & 0xFF,
551 		hw->fw_ver_actual & 0xFFFF);
552 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
553 
554 	err = atl_start_queues(dev);
555 	if (err < 0) {
556 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
557 		goto error;
558 	}
559 
560 	err = atl_dev_set_link_up(dev);
561 
562 	err = hw->aq_fw_ops->update_link_status(hw);
563 
564 	if (err)
565 		goto error;
566 
567 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
568 
569 	if (rte_intr_allow_others(intr_handle)) {
570 		/* check if lsc interrupt is enabled */
571 		if (dev->data->dev_conf.intr_conf.lsc != 0)
572 			atl_dev_lsc_interrupt_setup(dev, true);
573 		else
574 			atl_dev_lsc_interrupt_setup(dev, false);
575 	} else {
576 		rte_intr_callback_unregister(intr_handle,
577 					     atl_dev_interrupt_handler, dev);
578 		if (dev->data->dev_conf.intr_conf.lsc != 0)
579 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
580 				     " no intr multiplex");
581 	}
582 
583 	/* check if rxq interrupt is enabled */
584 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
585 	    rte_intr_dp_is_en(intr_handle))
586 		atl_dev_rxq_interrupt_setup(dev);
587 
588 	/* enable uio/vfio intr/eventfd mapping */
589 	rte_intr_enable(intr_handle);
590 
591 	/* resume enabled intr since hw reset */
592 	atl_enable_intr(dev);
593 
594 	return 0;
595 
596 error:
597 	atl_stop_queues(dev);
598 	return -EIO;
599 }
600 
601 /*
602  * Stop device: disable rx and tx functions to allow for reconfiguring.
603  */
604 static int
605 atl_dev_stop(struct rte_eth_dev *dev)
606 {
607 	struct rte_eth_link link;
608 	struct aq_hw_s *hw =
609 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
610 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
611 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
612 
613 	PMD_INIT_FUNC_TRACE();
614 	dev->data->dev_started = 0;
615 
616 	/* disable interrupts */
617 	atl_disable_intr(hw);
618 
619 	/* reset the NIC */
620 	atl_reset_hw(hw);
621 	hw->adapter_stopped = 1;
622 
623 	atl_stop_queues(dev);
624 
625 	/* Clear stored conf */
626 	dev->data->scattered_rx = 0;
627 	dev->data->lro = 0;
628 
629 	/* Clear recorded link status */
630 	memset(&link, 0, sizeof(link));
631 	rte_eth_linkstatus_set(dev, &link);
632 
633 	if (!rte_intr_allow_others(intr_handle))
634 		/* resume to the default handler */
635 		rte_intr_callback_register(intr_handle,
636 					   atl_dev_interrupt_handler,
637 					   (void *)dev);
638 
639 	/* Clean datapath event and queue/vec mapping */
640 	rte_intr_efd_disable(intr_handle);
641 	if (intr_handle->intr_vec != NULL) {
642 		rte_free(intr_handle->intr_vec);
643 		intr_handle->intr_vec = NULL;
644 	}
645 
646 	return 0;
647 }
648 
649 /*
650  * Set device link up: enable tx.
651  */
652 static int
653 atl_dev_set_link_up(struct rte_eth_dev *dev)
654 {
655 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
656 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
657 	uint32_t speed_mask = 0;
658 
659 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
660 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
661 	} else {
662 		if (link_speeds & ETH_LINK_SPEED_10G)
663 			speed_mask |= AQ_NIC_RATE_10G;
664 		if (link_speeds & ETH_LINK_SPEED_5G)
665 			speed_mask |= AQ_NIC_RATE_5G;
666 		if (link_speeds & ETH_LINK_SPEED_1G)
667 			speed_mask |= AQ_NIC_RATE_1G;
668 		if (link_speeds & ETH_LINK_SPEED_2_5G)
669 			speed_mask |=  AQ_NIC_RATE_2G5;
670 		if (link_speeds & ETH_LINK_SPEED_100M)
671 			speed_mask |= AQ_NIC_RATE_100M;
672 	}
673 
674 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
675 }
676 
677 /*
678  * Set device link down: disable tx.
679  */
680 static int
681 atl_dev_set_link_down(struct rte_eth_dev *dev)
682 {
683 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
684 
685 	return hw->aq_fw_ops->set_link_speed(hw, 0);
686 }
687 
688 /*
689  * Reset and stop device.
690  */
691 static int
692 atl_dev_close(struct rte_eth_dev *dev)
693 {
694 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
695 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
696 	struct aq_hw_s *hw;
697 	int ret;
698 
699 	PMD_INIT_FUNC_TRACE();
700 
701 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
702 		return 0;
703 
704 	hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
705 
706 	ret = atl_dev_stop(dev);
707 
708 	atl_free_queues(dev);
709 
710 	/* disable uio intr before callback unregister */
711 	rte_intr_disable(intr_handle);
712 	rte_intr_callback_unregister(intr_handle,
713 				     atl_dev_interrupt_handler, dev);
714 
715 	pthread_mutex_destroy(&hw->mbox_mutex);
716 
717 	return ret;
718 }
719 
720 static int
721 atl_dev_reset(struct rte_eth_dev *dev)
722 {
723 	int ret;
724 
725 	ret = atl_dev_close(dev);
726 	if (ret)
727 		return ret;
728 
729 	ret = eth_atl_dev_init(dev);
730 
731 	return ret;
732 }
733 
734 static int
735 atl_dev_configure_macsec(struct rte_eth_dev *dev)
736 {
737 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
738 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
739 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
740 	struct macsec_msg_fw_request msg_macsec;
741 	struct macsec_msg_fw_response response;
742 
743 	if (!aqcfg->common.macsec_enabled ||
744 	    hw->aq_fw_ops->send_macsec_req == NULL)
745 		return 0;
746 
747 	memset(&msg_macsec, 0, sizeof(msg_macsec));
748 
749 	/* Creating set of sc/sa structures from parameters provided by DPDK */
750 
751 	/* Configure macsec */
752 	msg_macsec.msg_type = macsec_cfg_msg;
753 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
754 	msg_macsec.cfg.interrupts_enabled = 1;
755 
756 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
757 
758 	if (response.result)
759 		return -1;
760 
761 	memset(&msg_macsec, 0, sizeof(msg_macsec));
762 
763 	/* Configure TX SC */
764 
765 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
766 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
767 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
768 
769 	/* MAC addr for TX */
770 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
771 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
772 	msg_macsec.txsc.sa_mask = 0x3f;
773 
774 	msg_macsec.txsc.da_mask = 0;
775 	msg_macsec.txsc.tci = 0x0B;
776 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
777 
778 	/*
779 	 * Creating SCI (Secure Channel Identifier).
780 	 * SCI constructed from Source MAC and Port identifier
781 	 */
782 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
783 			       (msg_macsec.txsc.mac_sa[0] >> 16);
784 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
785 
786 	uint32_t port_identifier = 1;
787 
788 	msg_macsec.txsc.sci[1] = sci_hi_part;
789 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
790 
791 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
792 
793 	if (response.result)
794 		return -1;
795 
796 	memset(&msg_macsec, 0, sizeof(msg_macsec));
797 
798 	/* Configure RX SC */
799 
800 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
801 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
802 	msg_macsec.rxsc.replay_protect =
803 		aqcfg->common.replay_protection_enabled;
804 	msg_macsec.rxsc.anti_replay_window = 0;
805 
806 	/* MAC addr for RX */
807 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
808 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
809 	msg_macsec.rxsc.da_mask = 0;//0x3f;
810 
811 	msg_macsec.rxsc.sa_mask = 0;
812 
813 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
814 
815 	if (response.result)
816 		return -1;
817 
818 	memset(&msg_macsec, 0, sizeof(msg_macsec));
819 
820 	/* Configure RX SC */
821 
822 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
823 	msg_macsec.txsa.index = aqcfg->txsa.idx;
824 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
825 
826 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
827 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
828 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
829 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
830 
831 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
832 
833 	if (response.result)
834 		return -1;
835 
836 	memset(&msg_macsec, 0, sizeof(msg_macsec));
837 
838 	/* Configure RX SA */
839 
840 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
841 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
842 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
843 
844 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
845 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
846 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
847 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
848 
849 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
850 
851 	if (response.result)
852 		return -1;
853 
854 	return 0;
855 }
856 
857 int atl_macsec_enable(struct rte_eth_dev *dev,
858 		      uint8_t encr, uint8_t repl_prot)
859 {
860 	struct aq_hw_cfg_s *cfg =
861 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
862 
863 	cfg->aq_macsec.common.macsec_enabled = 1;
864 	cfg->aq_macsec.common.encryption_enabled = encr;
865 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
866 
867 	return 0;
868 }
869 
870 int atl_macsec_disable(struct rte_eth_dev *dev)
871 {
872 	struct aq_hw_cfg_s *cfg =
873 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
874 
875 	cfg->aq_macsec.common.macsec_enabled = 0;
876 
877 	return 0;
878 }
879 
880 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
881 {
882 	struct aq_hw_cfg_s *cfg =
883 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
884 
885 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
886 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
887 		RTE_ETHER_ADDR_LEN);
888 
889 	return 0;
890 }
891 
892 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
893 			   uint8_t *mac, uint16_t pi)
894 {
895 	struct aq_hw_cfg_s *cfg =
896 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
897 
898 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
899 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
900 		RTE_ETHER_ADDR_LEN);
901 	cfg->aq_macsec.rxsc.pi = pi;
902 
903 	return 0;
904 }
905 
906 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
907 			   uint8_t idx, uint8_t an,
908 			   uint32_t pn, uint8_t *key)
909 {
910 	struct aq_hw_cfg_s *cfg =
911 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
912 
913 	cfg->aq_macsec.txsa.idx = idx;
914 	cfg->aq_macsec.txsa.pn = pn;
915 	cfg->aq_macsec.txsa.an = an;
916 
917 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
918 	return 0;
919 }
920 
921 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
922 			   uint8_t idx, uint8_t an,
923 			   uint32_t pn, uint8_t *key)
924 {
925 	struct aq_hw_cfg_s *cfg =
926 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
927 
928 	cfg->aq_macsec.rxsa.idx = idx;
929 	cfg->aq_macsec.rxsa.pn = pn;
930 	cfg->aq_macsec.rxsa.an = an;
931 
932 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
933 	return 0;
934 }
935 
936 static int
937 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
938 {
939 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
940 	struct aq_hw_s *hw = &adapter->hw;
941 	struct atl_sw_stats *swstats = &adapter->sw_stats;
942 	unsigned int i;
943 
944 	hw->aq_fw_ops->update_stats(hw);
945 
946 	/* Fill out the rte_eth_stats statistics structure */
947 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
948 	stats->ibytes = hw->curr_stats.dma_oct_rc;
949 	stats->imissed = hw->curr_stats.dpc;
950 	stats->ierrors = hw->curr_stats.erpt;
951 
952 	stats->opackets = hw->curr_stats.dma_pkt_tc;
953 	stats->obytes = hw->curr_stats.dma_oct_tc;
954 	stats->oerrors = 0;
955 
956 	stats->rx_nombuf = swstats->rx_nombuf;
957 
958 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
959 		stats->q_ipackets[i] = swstats->q_ipackets[i];
960 		stats->q_opackets[i] = swstats->q_opackets[i];
961 		stats->q_ibytes[i] = swstats->q_ibytes[i];
962 		stats->q_obytes[i] = swstats->q_obytes[i];
963 		stats->q_errors[i] = swstats->q_errors[i];
964 	}
965 	return 0;
966 }
967 
968 static int
969 atl_dev_stats_reset(struct rte_eth_dev *dev)
970 {
971 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
972 	struct aq_hw_s *hw = &adapter->hw;
973 
974 	hw->aq_fw_ops->update_stats(hw);
975 
976 	/* Reset software totals */
977 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
978 
979 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
980 
981 	return 0;
982 }
983 
984 static int
985 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
986 {
987 	struct atl_adapter *adapter =
988 		(struct atl_adapter *)dev->data->dev_private;
989 
990 	struct aq_hw_s *hw = &adapter->hw;
991 	unsigned int i, count = 0;
992 
993 	for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
994 		if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
995 			((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
996 			continue;
997 
998 		count++;
999 	}
1000 
1001 	return count;
1002 }
1003 
1004 static int
1005 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1006 			 struct rte_eth_xstat_name *xstats_names,
1007 			 unsigned int size)
1008 {
1009 	unsigned int i;
1010 	unsigned int count = atl_dev_xstats_get_count(dev);
1011 
1012 	if (xstats_names) {
1013 		for (i = 0; i < size && i < count; i++) {
1014 			snprintf(xstats_names[i].name,
1015 				RTE_ETH_XSTATS_NAME_SIZE, "%s",
1016 				atl_xstats_tbl[i].name);
1017 		}
1018 	}
1019 
1020 	return count;
1021 }
1022 
1023 static int
1024 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1025 		   unsigned int n)
1026 {
1027 	struct atl_adapter *adapter = dev->data->dev_private;
1028 	struct aq_hw_s *hw = &adapter->hw;
1029 	struct get_stats req = { 0 };
1030 	struct macsec_msg_fw_request msg = { 0 };
1031 	struct macsec_msg_fw_response resp = { 0 };
1032 	int err = -1;
1033 	unsigned int i;
1034 	unsigned int count = atl_dev_xstats_get_count(dev);
1035 
1036 	if (!stats)
1037 		return count;
1038 
1039 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1040 		req.ingress_sa_index = 0xff;
1041 		req.egress_sc_index = 0xff;
1042 		req.egress_sa_index = 0xff;
1043 
1044 		msg.msg_type = macsec_get_stats_msg;
1045 		msg.stats = req;
1046 
1047 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1048 	}
1049 
1050 	for (i = 0; i < n && i < count; i++) {
1051 		stats[i].id = i;
1052 
1053 		switch (atl_xstats_tbl[i].type) {
1054 		case XSTATS_TYPE_MSM:
1055 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1056 					 atl_xstats_tbl[i].offset);
1057 			break;
1058 		case XSTATS_TYPE_MACSEC:
1059 			if (!err) {
1060 				stats[i].value =
1061 					*(u64 *)((uint8_t *)&resp.stats +
1062 					atl_xstats_tbl[i].offset);
1063 			}
1064 			break;
1065 		}
1066 	}
1067 
1068 	return i;
1069 }
1070 
1071 static int
1072 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1073 {
1074 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075 	uint32_t fw_ver = 0;
1076 	int ret = 0;
1077 
1078 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1079 	if (ret)
1080 		return -EIO;
1081 
1082 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1083 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1084 	if (ret < 0)
1085 		return -EINVAL;
1086 
1087 	ret += 1; /* add string null-terminator */
1088 	if (fw_size < (size_t)ret)
1089 		return ret;
1090 
1091 	return 0;
1092 }
1093 
1094 static int
1095 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1096 {
1097 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1098 
1099 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1100 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1101 
1102 	dev_info->min_rx_bufsize = 1024;
1103 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1104 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1105 	dev_info->max_vfs = pci_dev->max_vfs;
1106 
1107 	dev_info->max_hash_mac_addrs = 0;
1108 	dev_info->max_vmdq_pools = 0;
1109 	dev_info->vmdq_queue_num = 0;
1110 
1111 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1112 
1113 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1114 
1115 
1116 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1117 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1118 	};
1119 
1120 	dev_info->default_txconf = (struct rte_eth_txconf) {
1121 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1122 	};
1123 
1124 	dev_info->rx_desc_lim = rx_desc_lim;
1125 	dev_info->tx_desc_lim = tx_desc_lim;
1126 
1127 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1128 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1129 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1130 
1131 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1132 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1133 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1134 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1135 
1136 	return 0;
1137 }
1138 
1139 static const uint32_t *
1140 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1141 {
1142 	static const uint32_t ptypes[] = {
1143 		RTE_PTYPE_L2_ETHER,
1144 		RTE_PTYPE_L2_ETHER_ARP,
1145 		RTE_PTYPE_L2_ETHER_VLAN,
1146 		RTE_PTYPE_L3_IPV4,
1147 		RTE_PTYPE_L3_IPV6,
1148 		RTE_PTYPE_L4_TCP,
1149 		RTE_PTYPE_L4_UDP,
1150 		RTE_PTYPE_L4_SCTP,
1151 		RTE_PTYPE_L4_ICMP,
1152 		RTE_PTYPE_UNKNOWN
1153 	};
1154 
1155 	if (dev->rx_pkt_burst == atl_recv_pkts)
1156 		return ptypes;
1157 
1158 	return NULL;
1159 }
1160 
1161 static void
1162 atl_dev_delayed_handler(void *param)
1163 {
1164 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1165 
1166 	atl_dev_configure_macsec(dev);
1167 }
1168 
1169 
1170 /* return 0 means link status changed, -1 means not changed */
1171 static int
1172 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1173 {
1174 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1175 	struct rte_eth_link link, old;
1176 	u32 fc = AQ_NIC_FC_OFF;
1177 	int err = 0;
1178 
1179 	link.link_status = ETH_LINK_DOWN;
1180 	link.link_speed = 0;
1181 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1182 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1183 	memset(&old, 0, sizeof(old));
1184 
1185 	/* load old link status */
1186 	rte_eth_linkstatus_get(dev, &old);
1187 
1188 	/* read current link status */
1189 	err = hw->aq_fw_ops->update_link_status(hw);
1190 
1191 	if (err)
1192 		return 0;
1193 
1194 	if (hw->aq_link_status.mbps == 0) {
1195 		/* write default (down) link status */
1196 		rte_eth_linkstatus_set(dev, &link);
1197 		if (link.link_status == old.link_status)
1198 			return -1;
1199 		return 0;
1200 	}
1201 
1202 	link.link_status = ETH_LINK_UP;
1203 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1204 	link.link_speed = hw->aq_link_status.mbps;
1205 
1206 	rte_eth_linkstatus_set(dev, &link);
1207 
1208 	if (link.link_status == old.link_status)
1209 		return -1;
1210 
1211 	/* Driver has to update flow control settings on RX block
1212 	 * on any link event.
1213 	 * We should query FW whether it negotiated FC.
1214 	 */
1215 	if (hw->aq_fw_ops->get_flow_control) {
1216 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1217 		hw_atl_b0_set_fc(hw, fc, 0U);
1218 	}
1219 
1220 	if (rte_eal_alarm_set(1000 * 1000,
1221 			      atl_dev_delayed_handler, (void *)dev) < 0)
1222 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1223 
1224 	return 0;
1225 }
1226 
1227 static int
1228 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1229 {
1230 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1231 
1232 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1233 
1234 	return 0;
1235 }
1236 
1237 static int
1238 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1239 {
1240 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1241 
1242 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1243 
1244 	return 0;
1245 }
1246 
1247 static int
1248 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1249 {
1250 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1251 
1252 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1253 
1254 	return 0;
1255 }
1256 
1257 static int
1258 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1259 {
1260 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1261 
1262 	if (dev->data->promiscuous == 1)
1263 		return 0; /* must remain in all_multicast mode */
1264 
1265 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1266 
1267 	return 0;
1268 }
1269 
1270 /**
1271  * It clears the interrupt causes and enables the interrupt.
1272  * It will be called once only during nic initialized.
1273  *
1274  * @param dev
1275  *  Pointer to struct rte_eth_dev.
1276  * @param on
1277  *  Enable or Disable.
1278  *
1279  * @return
1280  *  - On success, zero.
1281  *  - On failure, a negative value.
1282  */
1283 
1284 static int
1285 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1286 {
1287 	atl_dev_link_status_print(dev);
1288 	return 0;
1289 }
1290 
1291 static int
1292 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1293 {
1294 	return 0;
1295 }
1296 
1297 
1298 static int
1299 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1300 {
1301 	struct atl_interrupt *intr =
1302 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1303 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1304 	u64 cause = 0;
1305 
1306 	hw_atl_b0_hw_irq_read(hw, &cause);
1307 
1308 	atl_disable_intr(hw);
1309 
1310 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1311 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1312 
1313 	return 0;
1314 }
1315 
1316 /**
1317  * It gets and then prints the link status.
1318  *
1319  * @param dev
1320  *  Pointer to struct rte_eth_dev.
1321  *
1322  * @return
1323  *  - On success, zero.
1324  *  - On failure, a negative value.
1325  */
1326 static void
1327 atl_dev_link_status_print(struct rte_eth_dev *dev)
1328 {
1329 	struct rte_eth_link link;
1330 
1331 	memset(&link, 0, sizeof(link));
1332 	rte_eth_linkstatus_get(dev, &link);
1333 	if (link.link_status) {
1334 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1335 					(int)(dev->data->port_id),
1336 					(unsigned int)link.link_speed,
1337 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1338 					"full-duplex" : "half-duplex");
1339 	} else {
1340 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1341 				(int)(dev->data->port_id));
1342 	}
1343 
1344 
1345 #ifdef DEBUG
1346 {
1347 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1348 
1349 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1350 				pci_dev->addr.domain,
1351 				pci_dev->addr.bus,
1352 				pci_dev->addr.devid,
1353 				pci_dev->addr.function);
1354 }
1355 #endif
1356 
1357 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1358 }
1359 
1360 /*
1361  * It executes link_update after knowing an interrupt occurred.
1362  *
1363  * @param dev
1364  *  Pointer to struct rte_eth_dev.
1365  *
1366  * @return
1367  *  - On success, zero.
1368  *  - On failure, a negative value.
1369  */
1370 static int
1371 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1372 			   struct rte_intr_handle *intr_handle)
1373 {
1374 	struct atl_interrupt *intr =
1375 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1376 	struct atl_adapter *adapter = dev->data->dev_private;
1377 	struct aq_hw_s *hw = &adapter->hw;
1378 
1379 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1380 		goto done;
1381 
1382 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1383 
1384 	/* Notify userapp if link status changed */
1385 	if (!atl_dev_link_update(dev, 0)) {
1386 		atl_dev_link_status_print(dev);
1387 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1388 	} else {
1389 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1390 			goto done;
1391 
1392 		/* Check macsec Keys expired */
1393 		struct get_stats req = { 0 };
1394 		struct macsec_msg_fw_request msg = { 0 };
1395 		struct macsec_msg_fw_response resp = { 0 };
1396 
1397 		req.ingress_sa_index = 0x0;
1398 		req.egress_sc_index = 0x0;
1399 		req.egress_sa_index = 0x0;
1400 		msg.msg_type = macsec_get_stats_msg;
1401 		msg.stats = req;
1402 
1403 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1404 		if (err) {
1405 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1406 			goto done;
1407 		}
1408 		if (resp.stats.egress_threshold_expired ||
1409 		    resp.stats.ingress_threshold_expired ||
1410 		    resp.stats.egress_expired ||
1411 		    resp.stats.ingress_expired) {
1412 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1413 			rte_eth_dev_callback_process(dev,
1414 				RTE_ETH_EVENT_MACSEC, NULL);
1415 		}
1416 	}
1417 done:
1418 	atl_enable_intr(dev);
1419 	rte_intr_ack(intr_handle);
1420 
1421 	return 0;
1422 }
1423 
1424 /**
1425  * Interrupt handler triggered by NIC  for handling
1426  * specific interrupt.
1427  *
1428  * @param handle
1429  *  Pointer to interrupt handle.
1430  * @param param
1431  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1432  *
1433  * @return
1434  *  void
1435  */
1436 static void
1437 atl_dev_interrupt_handler(void *param)
1438 {
1439 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1440 
1441 	atl_dev_interrupt_get_status(dev);
1442 	atl_dev_interrupt_action(dev, dev->intr_handle);
1443 }
1444 
1445 
1446 static int
1447 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1448 {
1449 	return SFP_EEPROM_SIZE;
1450 }
1451 
1452 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1453 		       struct rte_dev_eeprom_info *eeprom)
1454 {
1455 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1456 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1457 
1458 	if (hw->aq_fw_ops->get_eeprom == NULL)
1459 		return -ENOTSUP;
1460 
1461 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1462 	    eeprom->data == NULL)
1463 		return -EINVAL;
1464 
1465 	if (eeprom->magic > 0x7F)
1466 		return -EINVAL;
1467 
1468 	if (eeprom->magic)
1469 		dev_addr = eeprom->magic;
1470 
1471 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1472 					 eeprom->length, eeprom->offset);
1473 }
1474 
1475 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1476 		       struct rte_dev_eeprom_info *eeprom)
1477 {
1478 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1479 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1480 
1481 	if (hw->aq_fw_ops->set_eeprom == NULL)
1482 		return -ENOTSUP;
1483 
1484 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1485 	    eeprom->data == NULL)
1486 		return -EINVAL;
1487 
1488 	if (eeprom->magic > 0x7F)
1489 		return -EINVAL;
1490 
1491 	if (eeprom->magic)
1492 		dev_addr = eeprom->magic;
1493 
1494 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1495 					 eeprom->length, eeprom->offset);
1496 }
1497 
1498 static int
1499 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1500 {
1501 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1502 	u32 mif_id;
1503 	int err;
1504 
1505 	if (regs->data == NULL) {
1506 		regs->length = hw_atl_utils_hw_get_reg_length();
1507 		regs->width = sizeof(u32);
1508 		return 0;
1509 	}
1510 
1511 	/* Only full register dump is supported */
1512 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1513 		return -ENOTSUP;
1514 
1515 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1516 
1517 	/* Device version */
1518 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1519 	regs->version = mif_id & 0xFFU;
1520 
1521 	return err;
1522 }
1523 
1524 static int
1525 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1526 {
1527 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1528 	u32 fc = AQ_NIC_FC_OFF;
1529 
1530 	if (hw->aq_fw_ops->get_flow_control == NULL)
1531 		return -ENOTSUP;
1532 
1533 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1534 
1535 	if (fc == AQ_NIC_FC_OFF)
1536 		fc_conf->mode = RTE_FC_NONE;
1537 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1538 		fc_conf->mode = RTE_FC_FULL;
1539 	else if (fc & AQ_NIC_FC_RX)
1540 		fc_conf->mode = RTE_FC_RX_PAUSE;
1541 	else if (fc & AQ_NIC_FC_TX)
1542 		fc_conf->mode = RTE_FC_TX_PAUSE;
1543 
1544 	return 0;
1545 }
1546 
1547 static int
1548 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1549 {
1550 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1551 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1552 
1553 
1554 	if (hw->aq_fw_ops->set_flow_control == NULL)
1555 		return -ENOTSUP;
1556 
1557 	if (fc_conf->mode == RTE_FC_NONE)
1558 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1559 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1560 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1561 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1562 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1563 	else if (fc_conf->mode == RTE_FC_FULL)
1564 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1565 
1566 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1567 		return hw->aq_fw_ops->set_flow_control(hw);
1568 
1569 	return 0;
1570 }
1571 
1572 static int
1573 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1574 		    u8 *mac_addr, bool enable)
1575 {
1576 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1577 	unsigned int h = 0U;
1578 	unsigned int l = 0U;
1579 	int err;
1580 
1581 	if (mac_addr) {
1582 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1583 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1584 			(mac_addr[4] << 8) | mac_addr[5];
1585 	}
1586 
1587 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1588 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1589 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1590 
1591 	if (enable)
1592 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1593 
1594 	err = aq_hw_err_from_flags(hw);
1595 
1596 	return err;
1597 }
1598 
1599 static int
1600 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1601 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1602 {
1603 	if (rte_is_zero_ether_addr(mac_addr)) {
1604 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1605 		return -EINVAL;
1606 	}
1607 
1608 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1609 }
1610 
1611 static void
1612 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1613 {
1614 	atl_update_mac_addr(dev, index, NULL, false);
1615 }
1616 
1617 static int
1618 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1619 {
1620 	atl_remove_mac_addr(dev, 0);
1621 	atl_add_mac_addr(dev, addr, 0, 0);
1622 	return 0;
1623 }
1624 
1625 static int
1626 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1627 {
1628 	struct rte_eth_dev_info dev_info;
1629 	int ret;
1630 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1631 
1632 	ret = atl_dev_info_get(dev, &dev_info);
1633 	if (ret != 0)
1634 		return ret;
1635 
1636 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1637 		return -EINVAL;
1638 
1639 	/* update max frame size */
1640 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1641 
1642 	return 0;
1643 }
1644 
1645 static int
1646 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1647 {
1648 	struct aq_hw_cfg_s *cfg =
1649 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1650 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1651 	int err = 0;
1652 	int i = 0;
1653 
1654 	PMD_INIT_FUNC_TRACE();
1655 
1656 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1657 		if (cfg->vlan_filter[i] == vlan_id) {
1658 			if (!on) {
1659 				/* Disable VLAN filter. */
1660 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1661 
1662 				/* Clear VLAN filter entry */
1663 				cfg->vlan_filter[i] = 0;
1664 			}
1665 			break;
1666 		}
1667 	}
1668 
1669 	/* VLAN_ID was not found. So, nothing to delete. */
1670 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1671 		goto exit;
1672 
1673 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1674 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1675 		goto exit;
1676 
1677 	/* Try to found free VLAN filter to add new VLAN_ID */
1678 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1679 		if (cfg->vlan_filter[i] == 0)
1680 			break;
1681 	}
1682 
1683 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1684 		/* We have no free VLAN filter to add new VLAN_ID*/
1685 		err = -ENOMEM;
1686 		goto exit;
1687 	}
1688 
1689 	cfg->vlan_filter[i] = vlan_id;
1690 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1691 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1692 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1693 
1694 exit:
1695 	/* Enable VLAN promisc mode if vlan_filter empty  */
1696 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1697 		if (cfg->vlan_filter[i] != 0)
1698 			break;
1699 	}
1700 
1701 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1702 
1703 	return err;
1704 }
1705 
1706 static int
1707 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1708 {
1709 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1710 	struct aq_hw_cfg_s *cfg =
1711 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1712 	int i;
1713 
1714 	PMD_INIT_FUNC_TRACE();
1715 
1716 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1717 		if (cfg->vlan_filter[i])
1718 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1719 	}
1720 	return 0;
1721 }
1722 
1723 static int
1724 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1725 {
1726 	struct aq_hw_cfg_s *cfg =
1727 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1728 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1729 	int ret = 0;
1730 	int i;
1731 
1732 	PMD_INIT_FUNC_TRACE();
1733 
1734 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1735 
1736 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1737 
1738 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1739 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1740 
1741 	if (mask & ETH_VLAN_EXTEND_MASK)
1742 		ret = -ENOTSUP;
1743 
1744 	return ret;
1745 }
1746 
1747 static int
1748 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1749 		  uint16_t tpid)
1750 {
1751 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1752 	int err = 0;
1753 
1754 	PMD_INIT_FUNC_TRACE();
1755 
1756 	switch (vlan_type) {
1757 	case ETH_VLAN_TYPE_INNER:
1758 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1759 		break;
1760 	case ETH_VLAN_TYPE_OUTER:
1761 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1762 		break;
1763 	default:
1764 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1765 		err = -ENOTSUP;
1766 	}
1767 
1768 	return err;
1769 }
1770 
1771 static void
1772 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1773 {
1774 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1775 
1776 	PMD_INIT_FUNC_TRACE();
1777 
1778 	if (queue_id > dev->data->nb_rx_queues) {
1779 		PMD_DRV_LOG(ERR, "Invalid queue id");
1780 		return;
1781 	}
1782 
1783 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1784 }
1785 
1786 static int
1787 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1788 			  struct rte_ether_addr *mc_addr_set,
1789 			  uint32_t nb_mc_addr)
1790 {
1791 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1792 	u32 i;
1793 
1794 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1795 		return -EINVAL;
1796 
1797 	/* Update whole uc filters table */
1798 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1799 		u8 *mac_addr = NULL;
1800 		u32 l = 0, h = 0;
1801 
1802 		if (i < nb_mc_addr) {
1803 			mac_addr = mc_addr_set[i].addr_bytes;
1804 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1805 				(mac_addr[4] << 8) | mac_addr[5];
1806 			h = (mac_addr[0] << 8) | mac_addr[1];
1807 		}
1808 
1809 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1810 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1811 							HW_ATL_B0_MAC_MIN + i);
1812 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1813 							HW_ATL_B0_MAC_MIN + i);
1814 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1815 					   HW_ATL_B0_MAC_MIN + i);
1816 	}
1817 
1818 	return 0;
1819 }
1820 
1821 static int
1822 atl_reta_update(struct rte_eth_dev *dev,
1823 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1824 		   uint16_t reta_size)
1825 {
1826 	int i;
1827 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1828 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1829 
1830 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1831 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1832 					dev->data->nb_rx_queues - 1);
1833 
1834 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1835 	return 0;
1836 }
1837 
1838 static int
1839 atl_reta_query(struct rte_eth_dev *dev,
1840 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1841 		    uint16_t reta_size)
1842 {
1843 	int i;
1844 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1845 
1846 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1847 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1848 	reta_conf->mask = ~0U;
1849 	return 0;
1850 }
1851 
1852 static int
1853 atl_rss_hash_update(struct rte_eth_dev *dev,
1854 				 struct rte_eth_rss_conf *rss_conf)
1855 {
1856 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1857 	struct aq_hw_cfg_s *cfg =
1858 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1859 	static u8 def_rss_key[40] = {
1860 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1861 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1862 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1863 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1864 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1865 	};
1866 
1867 	cfg->is_rss = !!rss_conf->rss_hf;
1868 	if (rss_conf->rss_key) {
1869 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1870 		       rss_conf->rss_key_len);
1871 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1872 	} else {
1873 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1874 		       sizeof(def_rss_key));
1875 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1876 	}
1877 
1878 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1879 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1880 	return 0;
1881 }
1882 
1883 static int
1884 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1885 				 struct rte_eth_rss_conf *rss_conf)
1886 {
1887 	struct aq_hw_cfg_s *cfg =
1888 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1889 
1890 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1891 	if (rss_conf->rss_key) {
1892 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1893 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1894 		       rss_conf->rss_key_len);
1895 	}
1896 
1897 	return 0;
1898 }
1899 
1900 static bool
1901 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1902 {
1903 	if (strcmp(dev->device->driver->name, drv->driver.name))
1904 		return false;
1905 
1906 	return true;
1907 }
1908 
1909 bool
1910 is_atlantic_supported(struct rte_eth_dev *dev)
1911 {
1912 	return is_device_supported(dev, &rte_atl_pmd);
1913 }
1914 
1915 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1916 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1917 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1918 RTE_LOG_REGISTER_SUFFIX(atl_logtype_init, init, NOTICE);
1919 RTE_LOG_REGISTER_SUFFIX(atl_logtype_driver, driver, NOTICE);
1920