xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static int atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static int  atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
47 
48 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
49 
50 /* VLAN stuff */
51 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
52 		uint16_t vlan_id, int on);
53 
54 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
55 
56 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
57 				     uint16_t queue_id, int on);
58 
59 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
60 			     enum rte_vlan_type vlan_type, uint16_t tpid);
61 
62 /* EEPROM */
63 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
64 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
65 			      struct rte_dev_eeprom_info *eeprom);
66 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
67 			      struct rte_dev_eeprom_info *eeprom);
68 
69 /* Regs */
70 static int atl_dev_get_regs(struct rte_eth_dev *dev,
71 			    struct rte_dev_reg_info *regs);
72 
73 /* Flow control */
74 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
75 			       struct rte_eth_fc_conf *fc_conf);
76 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
77 			       struct rte_eth_fc_conf *fc_conf);
78 
79 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
80 
81 /* Interrupts */
82 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
83 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
84 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
85 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
86 				    struct rte_intr_handle *handle);
87 static void atl_dev_interrupt_handler(void *param);
88 
89 
90 static int atl_add_mac_addr(struct rte_eth_dev *dev,
91 			    struct rte_ether_addr *mac_addr,
92 			    uint32_t index, uint32_t pool);
93 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
94 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
95 					   struct rte_ether_addr *mac_addr);
96 
97 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
98 				    struct rte_ether_addr *mc_addr_set,
99 				    uint32_t nb_mc_addr);
100 
101 /* RSS */
102 static int atl_reta_update(struct rte_eth_dev *dev,
103 			     struct rte_eth_rss_reta_entry64 *reta_conf,
104 			     uint16_t reta_size);
105 static int atl_reta_query(struct rte_eth_dev *dev,
106 			    struct rte_eth_rss_reta_entry64 *reta_conf,
107 			    uint16_t reta_size);
108 static int atl_rss_hash_update(struct rte_eth_dev *dev,
109 				 struct rte_eth_rss_conf *rss_conf);
110 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
111 				   struct rte_eth_rss_conf *rss_conf);
112 
113 
114 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
115 	struct rte_pci_device *pci_dev);
116 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
117 
118 static int atl_dev_info_get(struct rte_eth_dev *dev,
119 				struct rte_eth_dev_info *dev_info);
120 
121 /*
122  * The set of PCI devices this driver supports
123  */
124 static const struct rte_pci_id pci_id_atl_map[] = {
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
129 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
130 
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
137 
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
144 
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
147 	{ .vendor_id = 0, /* sentinel */ },
148 };
149 
150 static struct rte_pci_driver rte_atl_pmd = {
151 	.id_table = pci_id_atl_map,
152 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
153 	.probe = eth_atl_pci_probe,
154 	.remove = eth_atl_pci_remove,
155 };
156 
157 #define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
158 			| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
159 			| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
160 			| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
161 			| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
162 			| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
163 
164 #define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
165 			| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
166 			| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
167 			| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
168 			| RTE_ETH_TX_OFFLOAD_TCP_TSO \
169 			| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
170 			| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
171 
172 #define SFP_EEPROM_SIZE 0x100
173 
174 static const struct rte_eth_desc_lim rx_desc_lim = {
175 	.nb_max = ATL_MAX_RING_DESC,
176 	.nb_min = ATL_MIN_RING_DESC,
177 	.nb_align = ATL_RXD_ALIGN,
178 };
179 
180 static const struct rte_eth_desc_lim tx_desc_lim = {
181 	.nb_max = ATL_MAX_RING_DESC,
182 	.nb_min = ATL_MIN_RING_DESC,
183 	.nb_align = ATL_TXD_ALIGN,
184 	.nb_seg_max = ATL_TX_MAX_SEG,
185 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
186 };
187 
188 enum atl_xstats_type {
189 	XSTATS_TYPE_MSM = 0,
190 	XSTATS_TYPE_MACSEC,
191 };
192 
193 #define ATL_XSTATS_FIELD(name) { \
194 	#name, \
195 	offsetof(struct aq_stats_s, name), \
196 	XSTATS_TYPE_MSM \
197 }
198 
199 #define ATL_MACSEC_XSTATS_FIELD(name) { \
200 	#name, \
201 	offsetof(struct macsec_stats, name), \
202 	XSTATS_TYPE_MACSEC \
203 }
204 
205 struct atl_xstats_tbl_s {
206 	const char *name;
207 	unsigned int offset;
208 	enum atl_xstats_type type;
209 };
210 
211 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
212 	ATL_XSTATS_FIELD(uprc),
213 	ATL_XSTATS_FIELD(mprc),
214 	ATL_XSTATS_FIELD(bprc),
215 	ATL_XSTATS_FIELD(erpt),
216 	ATL_XSTATS_FIELD(uptc),
217 	ATL_XSTATS_FIELD(mptc),
218 	ATL_XSTATS_FIELD(bptc),
219 	ATL_XSTATS_FIELD(erpr),
220 	ATL_XSTATS_FIELD(ubrc),
221 	ATL_XSTATS_FIELD(ubtc),
222 	ATL_XSTATS_FIELD(mbrc),
223 	ATL_XSTATS_FIELD(mbtc),
224 	ATL_XSTATS_FIELD(bbrc),
225 	ATL_XSTATS_FIELD(bbtc),
226 	/* Ingress Common Counters */
227 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
228 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
229 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
230 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
231 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
232 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
233 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
234 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
235 	/* Ingress SA Counters */
236 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
237 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
238 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
239 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
244 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
245 	/* Egress Common Counters */
246 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
247 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
250 	/* Egress SC Counters */
251 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
252 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
253 	/* Egress SA Counters */
254 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
255 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
256 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
258 };
259 
260 static const struct eth_dev_ops atl_eth_dev_ops = {
261 	.dev_configure	      = atl_dev_configure,
262 	.dev_start	      = atl_dev_start,
263 	.dev_stop	      = atl_dev_stop,
264 	.dev_set_link_up      = atl_dev_set_link_up,
265 	.dev_set_link_down    = atl_dev_set_link_down,
266 	.dev_close	      = atl_dev_close,
267 	.dev_reset	      = atl_dev_reset,
268 
269 	/* PROMISC */
270 	.promiscuous_enable   = atl_dev_promiscuous_enable,
271 	.promiscuous_disable  = atl_dev_promiscuous_disable,
272 	.allmulticast_enable  = atl_dev_allmulticast_enable,
273 	.allmulticast_disable = atl_dev_allmulticast_disable,
274 
275 	/* Link */
276 	.link_update	      = atl_dev_link_update,
277 
278 	.get_reg              = atl_dev_get_regs,
279 
280 	/* Stats */
281 	.stats_get	      = atl_dev_stats_get,
282 	.xstats_get	      = atl_dev_xstats_get,
283 	.xstats_get_names     = atl_dev_xstats_get_names,
284 	.stats_reset	      = atl_dev_stats_reset,
285 	.xstats_reset	      = atl_dev_stats_reset,
286 
287 	.fw_version_get       = atl_fw_version_get,
288 	.dev_infos_get	      = atl_dev_info_get,
289 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
290 
291 	.mtu_set              = atl_dev_mtu_set,
292 
293 	/* VLAN */
294 	.vlan_filter_set      = atl_vlan_filter_set,
295 	.vlan_offload_set     = atl_vlan_offload_set,
296 	.vlan_tpid_set        = atl_vlan_tpid_set,
297 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
298 
299 	/* Queue Control */
300 	.rx_queue_start	      = atl_rx_queue_start,
301 	.rx_queue_stop	      = atl_rx_queue_stop,
302 	.rx_queue_setup       = atl_rx_queue_setup,
303 	.rx_queue_release     = atl_rx_queue_release,
304 
305 	.tx_queue_start	      = atl_tx_queue_start,
306 	.tx_queue_stop	      = atl_tx_queue_stop,
307 	.tx_queue_setup       = atl_tx_queue_setup,
308 	.tx_queue_release     = atl_tx_queue_release,
309 
310 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
311 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
312 
313 	/* EEPROM */
314 	.get_eeprom_length    = atl_dev_get_eeprom_length,
315 	.get_eeprom           = atl_dev_get_eeprom,
316 	.set_eeprom           = atl_dev_set_eeprom,
317 
318 	/* Flow Control */
319 	.flow_ctrl_get	      = atl_flow_ctrl_get,
320 	.flow_ctrl_set	      = atl_flow_ctrl_set,
321 
322 	/* MAC */
323 	.mac_addr_add	      = atl_add_mac_addr,
324 	.mac_addr_remove      = atl_remove_mac_addr,
325 	.mac_addr_set	      = atl_set_default_mac_addr,
326 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
327 	.rxq_info_get	      = atl_rxq_info_get,
328 	.txq_info_get	      = atl_txq_info_get,
329 
330 	.reta_update          = atl_reta_update,
331 	.reta_query           = atl_reta_query,
332 	.rss_hash_update      = atl_rss_hash_update,
333 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
334 };
335 
336 static inline int32_t
337 atl_reset_hw(struct aq_hw_s *hw)
338 {
339 	return hw_atl_b0_hw_reset(hw);
340 }
341 
342 static inline void
343 atl_enable_intr(struct rte_eth_dev *dev)
344 {
345 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
346 
347 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
348 }
349 
350 static void
351 atl_disable_intr(struct aq_hw_s *hw)
352 {
353 	PMD_INIT_FUNC_TRACE();
354 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
355 }
356 
357 static int
358 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
359 {
360 	struct atl_adapter *adapter = eth_dev->data->dev_private;
361 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
362 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
363 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
364 	int err = 0;
365 
366 	PMD_INIT_FUNC_TRACE();
367 
368 	eth_dev->dev_ops = &atl_eth_dev_ops;
369 
370 	eth_dev->rx_queue_count       = atl_rx_queue_count;
371 	eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
372 	eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
373 
374 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
375 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
376 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
377 
378 	/* For secondary processes, the primary process has done all the work */
379 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
380 		return 0;
381 
382 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
383 
384 	/* Vendor and Device ID need to be set before init of shared code */
385 	hw->device_id = pci_dev->id.device_id;
386 	hw->vendor_id = pci_dev->id.vendor_id;
387 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
388 
389 	/* Hardware configuration - hardcode */
390 	adapter->hw_cfg.is_lro = false;
391 	adapter->hw_cfg.wol = false;
392 	adapter->hw_cfg.is_rss = false;
393 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
394 
395 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
396 			  AQ_NIC_RATE_5G |
397 			  AQ_NIC_RATE_2G5 |
398 			  AQ_NIC_RATE_1G |
399 			  AQ_NIC_RATE_100M;
400 
401 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
402 	adapter->hw_cfg.aq_rss.indirection_table_size =
403 		HW_ATL_B0_RSS_REDIRECTION_MAX;
404 
405 	hw->aq_nic_cfg = &adapter->hw_cfg;
406 
407 	pthread_mutex_init(&hw->mbox_mutex, NULL);
408 
409 	/* disable interrupt */
410 	atl_disable_intr(hw);
411 
412 	/* Allocate memory for storing MAC addresses */
413 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
414 					RTE_ETHER_ADDR_LEN, 0);
415 	if (eth_dev->data->mac_addrs == NULL) {
416 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
417 		return -ENOMEM;
418 	}
419 
420 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
421 	if (err)
422 		return err;
423 
424 	/* Copy the permanent MAC address */
425 	if (hw->aq_fw_ops->get_mac_permanent(hw,
426 			eth_dev->data->mac_addrs->addr_bytes) != 0)
427 		return -EINVAL;
428 
429 	/* Reset the hw statistics */
430 	atl_dev_stats_reset(eth_dev);
431 
432 	rte_intr_callback_register(intr_handle,
433 				   atl_dev_interrupt_handler, eth_dev);
434 
435 	/* enable uio/vfio intr/eventfd mapping */
436 	rte_intr_enable(intr_handle);
437 
438 	/* enable support intr */
439 	atl_enable_intr(eth_dev);
440 
441 	return err;
442 }
443 
444 static int
445 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
446 	struct rte_pci_device *pci_dev)
447 {
448 	return rte_eth_dev_pci_generic_probe(pci_dev,
449 		sizeof(struct atl_adapter), eth_atl_dev_init);
450 }
451 
452 static int
453 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
454 {
455 	return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close);
456 }
457 
458 static int
459 atl_dev_configure(struct rte_eth_dev *dev)
460 {
461 	struct atl_interrupt *intr =
462 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
463 
464 	PMD_INIT_FUNC_TRACE();
465 
466 	/* set flag to update link status after init */
467 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
468 
469 	return 0;
470 }
471 
472 /*
473  * Configure device link speed and setup link.
474  * It returns 0 on success.
475  */
476 static int
477 atl_dev_start(struct rte_eth_dev *dev)
478 {
479 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
480 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
481 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
482 	uint32_t intr_vector = 0;
483 	int status;
484 	int err;
485 
486 	PMD_INIT_FUNC_TRACE();
487 
488 	/* set adapter started */
489 	hw->adapter_stopped = 0;
490 
491 	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
492 		PMD_INIT_LOG(ERR,
493 		"Invalid link_speeds for port %u, fix speed not supported",
494 				dev->data->port_id);
495 		return -EINVAL;
496 	}
497 
498 	/* disable uio/vfio intr/eventfd mapping */
499 	rte_intr_disable(intr_handle);
500 
501 	/* reinitialize adapter
502 	 * this calls reset and start
503 	 */
504 	status = atl_reset_hw(hw);
505 	if (status != 0)
506 		return -EIO;
507 
508 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
509 
510 	hw_atl_b0_hw_start(hw);
511 	/* check and configure queue intr-vector mapping */
512 	if ((rte_intr_cap_multiple(intr_handle) ||
513 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
514 	    dev->data->dev_conf.intr_conf.rxq != 0) {
515 		intr_vector = dev->data->nb_rx_queues;
516 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
517 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
518 					ATL_MAX_INTR_QUEUE_NUM);
519 			return -ENOTSUP;
520 		}
521 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
522 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
523 			return -1;
524 		}
525 	}
526 
527 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
528 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
529 				    dev->data->nb_rx_queues * sizeof(int), 0);
530 		if (intr_handle->intr_vec == NULL) {
531 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
532 				     " intr_vec", dev->data->nb_rx_queues);
533 			return -ENOMEM;
534 		}
535 	}
536 
537 	/* initialize transmission unit */
538 	atl_tx_init(dev);
539 
540 	/* This can fail when allocating mbufs for descriptor rings */
541 	err = atl_rx_init(dev);
542 	if (err) {
543 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
544 		goto error;
545 	}
546 
547 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
548 		hw->fw_ver_actual >> 24,
549 		(hw->fw_ver_actual >> 16) & 0xFF,
550 		hw->fw_ver_actual & 0xFFFF);
551 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
552 
553 	err = atl_start_queues(dev);
554 	if (err < 0) {
555 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
556 		goto error;
557 	}
558 
559 	err = atl_dev_set_link_up(dev);
560 
561 	err = hw->aq_fw_ops->update_link_status(hw);
562 
563 	if (err)
564 		goto error;
565 
566 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
567 
568 	if (rte_intr_allow_others(intr_handle)) {
569 		/* check if lsc interrupt is enabled */
570 		if (dev->data->dev_conf.intr_conf.lsc != 0)
571 			atl_dev_lsc_interrupt_setup(dev, true);
572 		else
573 			atl_dev_lsc_interrupt_setup(dev, false);
574 	} else {
575 		rte_intr_callback_unregister(intr_handle,
576 					     atl_dev_interrupt_handler, dev);
577 		if (dev->data->dev_conf.intr_conf.lsc != 0)
578 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
579 				     " no intr multiplex");
580 	}
581 
582 	/* check if rxq interrupt is enabled */
583 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
584 	    rte_intr_dp_is_en(intr_handle))
585 		atl_dev_rxq_interrupt_setup(dev);
586 
587 	/* enable uio/vfio intr/eventfd mapping */
588 	rte_intr_enable(intr_handle);
589 
590 	/* resume enabled intr since hw reset */
591 	atl_enable_intr(dev);
592 
593 	return 0;
594 
595 error:
596 	atl_stop_queues(dev);
597 	return -EIO;
598 }
599 
600 /*
601  * Stop device: disable rx and tx functions to allow for reconfiguring.
602  */
603 static int
604 atl_dev_stop(struct rte_eth_dev *dev)
605 {
606 	struct rte_eth_link link;
607 	struct aq_hw_s *hw =
608 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
609 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
610 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
611 
612 	PMD_INIT_FUNC_TRACE();
613 	dev->data->dev_started = 0;
614 
615 	/* disable interrupts */
616 	atl_disable_intr(hw);
617 
618 	/* reset the NIC */
619 	atl_reset_hw(hw);
620 	hw->adapter_stopped = 1;
621 
622 	atl_stop_queues(dev);
623 
624 	/* Clear stored conf */
625 	dev->data->scattered_rx = 0;
626 	dev->data->lro = 0;
627 
628 	/* Clear recorded link status */
629 	memset(&link, 0, sizeof(link));
630 	rte_eth_linkstatus_set(dev, &link);
631 
632 	if (!rte_intr_allow_others(intr_handle))
633 		/* resume to the default handler */
634 		rte_intr_callback_register(intr_handle,
635 					   atl_dev_interrupt_handler,
636 					   (void *)dev);
637 
638 	/* Clean datapath event and queue/vec mapping */
639 	rte_intr_efd_disable(intr_handle);
640 	if (intr_handle->intr_vec != NULL) {
641 		rte_free(intr_handle->intr_vec);
642 		intr_handle->intr_vec = NULL;
643 	}
644 
645 	return 0;
646 }
647 
648 /*
649  * Set device link up: enable tx.
650  */
651 static int
652 atl_dev_set_link_up(struct rte_eth_dev *dev)
653 {
654 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
655 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
656 	uint32_t speed_mask = 0;
657 
658 	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
659 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
660 	} else {
661 		if (link_speeds & RTE_ETH_LINK_SPEED_10G)
662 			speed_mask |= AQ_NIC_RATE_10G;
663 		if (link_speeds & RTE_ETH_LINK_SPEED_5G)
664 			speed_mask |= AQ_NIC_RATE_5G;
665 		if (link_speeds & RTE_ETH_LINK_SPEED_1G)
666 			speed_mask |= AQ_NIC_RATE_1G;
667 		if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
668 			speed_mask |=  AQ_NIC_RATE_2G5;
669 		if (link_speeds & RTE_ETH_LINK_SPEED_100M)
670 			speed_mask |= AQ_NIC_RATE_100M;
671 	}
672 
673 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
674 }
675 
676 /*
677  * Set device link down: disable tx.
678  */
679 static int
680 atl_dev_set_link_down(struct rte_eth_dev *dev)
681 {
682 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
683 
684 	return hw->aq_fw_ops->set_link_speed(hw, 0);
685 }
686 
687 /*
688  * Reset and stop device.
689  */
690 static int
691 atl_dev_close(struct rte_eth_dev *dev)
692 {
693 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
694 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
695 	struct aq_hw_s *hw;
696 	int ret;
697 
698 	PMD_INIT_FUNC_TRACE();
699 
700 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
701 		return 0;
702 
703 	hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
704 
705 	ret = atl_dev_stop(dev);
706 
707 	atl_free_queues(dev);
708 
709 	/* disable uio intr before callback unregister */
710 	rte_intr_disable(intr_handle);
711 	rte_intr_callback_unregister(intr_handle,
712 				     atl_dev_interrupt_handler, dev);
713 
714 	pthread_mutex_destroy(&hw->mbox_mutex);
715 
716 	return ret;
717 }
718 
719 static int
720 atl_dev_reset(struct rte_eth_dev *dev)
721 {
722 	int ret;
723 
724 	ret = atl_dev_close(dev);
725 	if (ret)
726 		return ret;
727 
728 	ret = eth_atl_dev_init(dev);
729 
730 	return ret;
731 }
732 
733 static int
734 atl_dev_configure_macsec(struct rte_eth_dev *dev)
735 {
736 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
737 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
738 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
739 	struct macsec_msg_fw_request msg_macsec;
740 	struct macsec_msg_fw_response response;
741 
742 	if (!aqcfg->common.macsec_enabled ||
743 	    hw->aq_fw_ops->send_macsec_req == NULL)
744 		return 0;
745 
746 	memset(&msg_macsec, 0, sizeof(msg_macsec));
747 
748 	/* Creating set of sc/sa structures from parameters provided by DPDK */
749 
750 	/* Configure macsec */
751 	msg_macsec.msg_type = macsec_cfg_msg;
752 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
753 	msg_macsec.cfg.interrupts_enabled = 1;
754 
755 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
756 
757 	if (response.result)
758 		return -1;
759 
760 	memset(&msg_macsec, 0, sizeof(msg_macsec));
761 
762 	/* Configure TX SC */
763 
764 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
765 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
766 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
767 
768 	/* MAC addr for TX */
769 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
770 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
771 	msg_macsec.txsc.sa_mask = 0x3f;
772 
773 	msg_macsec.txsc.da_mask = 0;
774 	msg_macsec.txsc.tci = 0x0B;
775 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
776 
777 	/*
778 	 * Creating SCI (Secure Channel Identifier).
779 	 * SCI constructed from Source MAC and Port identifier
780 	 */
781 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
782 			       (msg_macsec.txsc.mac_sa[0] >> 16);
783 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
784 
785 	uint32_t port_identifier = 1;
786 
787 	msg_macsec.txsc.sci[1] = sci_hi_part;
788 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
789 
790 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
791 
792 	if (response.result)
793 		return -1;
794 
795 	memset(&msg_macsec, 0, sizeof(msg_macsec));
796 
797 	/* Configure RX SC */
798 
799 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
800 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
801 	msg_macsec.rxsc.replay_protect =
802 		aqcfg->common.replay_protection_enabled;
803 	msg_macsec.rxsc.anti_replay_window = 0;
804 
805 	/* MAC addr for RX */
806 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
807 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
808 	msg_macsec.rxsc.da_mask = 0;//0x3f;
809 
810 	msg_macsec.rxsc.sa_mask = 0;
811 
812 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
813 
814 	if (response.result)
815 		return -1;
816 
817 	memset(&msg_macsec, 0, sizeof(msg_macsec));
818 
819 	/* Configure RX SC */
820 
821 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
822 	msg_macsec.txsa.index = aqcfg->txsa.idx;
823 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
824 
825 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
826 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
827 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
828 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
829 
830 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
831 
832 	if (response.result)
833 		return -1;
834 
835 	memset(&msg_macsec, 0, sizeof(msg_macsec));
836 
837 	/* Configure RX SA */
838 
839 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
840 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
841 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
842 
843 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
844 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
845 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
846 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
847 
848 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
849 
850 	if (response.result)
851 		return -1;
852 
853 	return 0;
854 }
855 
856 int atl_macsec_enable(struct rte_eth_dev *dev,
857 		      uint8_t encr, uint8_t repl_prot)
858 {
859 	struct aq_hw_cfg_s *cfg =
860 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
861 
862 	cfg->aq_macsec.common.macsec_enabled = 1;
863 	cfg->aq_macsec.common.encryption_enabled = encr;
864 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
865 
866 	return 0;
867 }
868 
869 int atl_macsec_disable(struct rte_eth_dev *dev)
870 {
871 	struct aq_hw_cfg_s *cfg =
872 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
873 
874 	cfg->aq_macsec.common.macsec_enabled = 0;
875 
876 	return 0;
877 }
878 
879 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
880 {
881 	struct aq_hw_cfg_s *cfg =
882 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
883 
884 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
885 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
886 		RTE_ETHER_ADDR_LEN);
887 
888 	return 0;
889 }
890 
891 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
892 			   uint8_t *mac, uint16_t pi)
893 {
894 	struct aq_hw_cfg_s *cfg =
895 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
896 
897 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
898 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
899 		RTE_ETHER_ADDR_LEN);
900 	cfg->aq_macsec.rxsc.pi = pi;
901 
902 	return 0;
903 }
904 
905 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
906 			   uint8_t idx, uint8_t an,
907 			   uint32_t pn, uint8_t *key)
908 {
909 	struct aq_hw_cfg_s *cfg =
910 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
911 
912 	cfg->aq_macsec.txsa.idx = idx;
913 	cfg->aq_macsec.txsa.pn = pn;
914 	cfg->aq_macsec.txsa.an = an;
915 
916 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
917 	return 0;
918 }
919 
920 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
921 			   uint8_t idx, uint8_t an,
922 			   uint32_t pn, uint8_t *key)
923 {
924 	struct aq_hw_cfg_s *cfg =
925 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
926 
927 	cfg->aq_macsec.rxsa.idx = idx;
928 	cfg->aq_macsec.rxsa.pn = pn;
929 	cfg->aq_macsec.rxsa.an = an;
930 
931 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
932 	return 0;
933 }
934 
935 static int
936 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
937 {
938 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
939 	struct aq_hw_s *hw = &adapter->hw;
940 	struct atl_sw_stats *swstats = &adapter->sw_stats;
941 	unsigned int i;
942 
943 	hw->aq_fw_ops->update_stats(hw);
944 
945 	/* Fill out the rte_eth_stats statistics structure */
946 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
947 	stats->ibytes = hw->curr_stats.dma_oct_rc;
948 	stats->imissed = hw->curr_stats.dpc;
949 	stats->ierrors = hw->curr_stats.erpt;
950 
951 	stats->opackets = hw->curr_stats.dma_pkt_tc;
952 	stats->obytes = hw->curr_stats.dma_oct_tc;
953 	stats->oerrors = 0;
954 
955 	stats->rx_nombuf = swstats->rx_nombuf;
956 
957 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
958 		stats->q_ipackets[i] = swstats->q_ipackets[i];
959 		stats->q_opackets[i] = swstats->q_opackets[i];
960 		stats->q_ibytes[i] = swstats->q_ibytes[i];
961 		stats->q_obytes[i] = swstats->q_obytes[i];
962 		stats->q_errors[i] = swstats->q_errors[i];
963 	}
964 	return 0;
965 }
966 
967 static int
968 atl_dev_stats_reset(struct rte_eth_dev *dev)
969 {
970 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
971 	struct aq_hw_s *hw = &adapter->hw;
972 
973 	hw->aq_fw_ops->update_stats(hw);
974 
975 	/* Reset software totals */
976 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
977 
978 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
979 
980 	return 0;
981 }
982 
983 static int
984 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
985 {
986 	struct atl_adapter *adapter =
987 		(struct atl_adapter *)dev->data->dev_private;
988 
989 	struct aq_hw_s *hw = &adapter->hw;
990 	unsigned int i, count = 0;
991 
992 	for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
993 		if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
994 			((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
995 			continue;
996 
997 		count++;
998 	}
999 
1000 	return count;
1001 }
1002 
1003 static int
1004 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1005 			 struct rte_eth_xstat_name *xstats_names,
1006 			 unsigned int size)
1007 {
1008 	unsigned int i;
1009 	unsigned int count = atl_dev_xstats_get_count(dev);
1010 
1011 	if (xstats_names) {
1012 		for (i = 0; i < size && i < count; i++) {
1013 			snprintf(xstats_names[i].name,
1014 				RTE_ETH_XSTATS_NAME_SIZE, "%s",
1015 				atl_xstats_tbl[i].name);
1016 		}
1017 	}
1018 
1019 	return count;
1020 }
1021 
1022 static int
1023 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1024 		   unsigned int n)
1025 {
1026 	struct atl_adapter *adapter = dev->data->dev_private;
1027 	struct aq_hw_s *hw = &adapter->hw;
1028 	struct get_stats req = { 0 };
1029 	struct macsec_msg_fw_request msg = { 0 };
1030 	struct macsec_msg_fw_response resp = { 0 };
1031 	int err = -1;
1032 	unsigned int i;
1033 	unsigned int count = atl_dev_xstats_get_count(dev);
1034 
1035 	if (!stats)
1036 		return count;
1037 
1038 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1039 		req.ingress_sa_index = 0xff;
1040 		req.egress_sc_index = 0xff;
1041 		req.egress_sa_index = 0xff;
1042 
1043 		msg.msg_type = macsec_get_stats_msg;
1044 		msg.stats = req;
1045 
1046 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1047 	}
1048 
1049 	for (i = 0; i < n && i < count; i++) {
1050 		stats[i].id = i;
1051 
1052 		switch (atl_xstats_tbl[i].type) {
1053 		case XSTATS_TYPE_MSM:
1054 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1055 					 atl_xstats_tbl[i].offset);
1056 			break;
1057 		case XSTATS_TYPE_MACSEC:
1058 			if (!err) {
1059 				stats[i].value =
1060 					*(u64 *)((uint8_t *)&resp.stats +
1061 					atl_xstats_tbl[i].offset);
1062 			}
1063 			break;
1064 		}
1065 	}
1066 
1067 	return i;
1068 }
1069 
1070 static int
1071 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1072 {
1073 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1074 	uint32_t fw_ver = 0;
1075 	int ret = 0;
1076 
1077 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1078 	if (ret)
1079 		return -EIO;
1080 
1081 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1082 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1083 	if (ret < 0)
1084 		return -EINVAL;
1085 
1086 	ret += 1; /* add string null-terminator */
1087 	if (fw_size < (size_t)ret)
1088 		return ret;
1089 
1090 	return 0;
1091 }
1092 
1093 static int
1094 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1095 {
1096 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1097 
1098 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1099 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1100 
1101 	dev_info->min_rx_bufsize = 1024;
1102 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1103 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1104 	dev_info->max_vfs = pci_dev->max_vfs;
1105 
1106 	dev_info->max_hash_mac_addrs = 0;
1107 	dev_info->max_vmdq_pools = 0;
1108 	dev_info->vmdq_queue_num = 0;
1109 
1110 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1111 
1112 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1113 
1114 
1115 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1116 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1117 	};
1118 
1119 	dev_info->default_txconf = (struct rte_eth_txconf) {
1120 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1121 	};
1122 
1123 	dev_info->rx_desc_lim = rx_desc_lim;
1124 	dev_info->tx_desc_lim = tx_desc_lim;
1125 
1126 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1127 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1128 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1129 
1130 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
1131 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
1132 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
1133 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
1134 
1135 	return 0;
1136 }
1137 
1138 static const uint32_t *
1139 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1140 {
1141 	static const uint32_t ptypes[] = {
1142 		RTE_PTYPE_L2_ETHER,
1143 		RTE_PTYPE_L2_ETHER_ARP,
1144 		RTE_PTYPE_L2_ETHER_VLAN,
1145 		RTE_PTYPE_L3_IPV4,
1146 		RTE_PTYPE_L3_IPV6,
1147 		RTE_PTYPE_L4_TCP,
1148 		RTE_PTYPE_L4_UDP,
1149 		RTE_PTYPE_L4_SCTP,
1150 		RTE_PTYPE_L4_ICMP,
1151 		RTE_PTYPE_UNKNOWN
1152 	};
1153 
1154 	if (dev->rx_pkt_burst == atl_recv_pkts)
1155 		return ptypes;
1156 
1157 	return NULL;
1158 }
1159 
1160 static void
1161 atl_dev_delayed_handler(void *param)
1162 {
1163 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1164 
1165 	atl_dev_configure_macsec(dev);
1166 }
1167 
1168 
1169 /* return 0 means link status changed, -1 means not changed */
1170 static int
1171 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1172 {
1173 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1174 	struct rte_eth_link link, old;
1175 	u32 fc = AQ_NIC_FC_OFF;
1176 	int err = 0;
1177 
1178 	link.link_status = RTE_ETH_LINK_DOWN;
1179 	link.link_speed = 0;
1180 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1181 	link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
1182 	memset(&old, 0, sizeof(old));
1183 
1184 	/* load old link status */
1185 	rte_eth_linkstatus_get(dev, &old);
1186 
1187 	/* read current link status */
1188 	err = hw->aq_fw_ops->update_link_status(hw);
1189 
1190 	if (err)
1191 		return 0;
1192 
1193 	if (hw->aq_link_status.mbps == 0) {
1194 		/* write default (down) link status */
1195 		rte_eth_linkstatus_set(dev, &link);
1196 		if (link.link_status == old.link_status)
1197 			return -1;
1198 		return 0;
1199 	}
1200 
1201 	link.link_status = RTE_ETH_LINK_UP;
1202 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1203 	link.link_speed = hw->aq_link_status.mbps;
1204 
1205 	rte_eth_linkstatus_set(dev, &link);
1206 
1207 	if (link.link_status == old.link_status)
1208 		return -1;
1209 
1210 	/* Driver has to update flow control settings on RX block
1211 	 * on any link event.
1212 	 * We should query FW whether it negotiated FC.
1213 	 */
1214 	if (hw->aq_fw_ops->get_flow_control) {
1215 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1216 		hw_atl_b0_set_fc(hw, fc, 0U);
1217 	}
1218 
1219 	if (rte_eal_alarm_set(1000 * 1000,
1220 			      atl_dev_delayed_handler, (void *)dev) < 0)
1221 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1222 
1223 	return 0;
1224 }
1225 
1226 static int
1227 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1228 {
1229 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230 
1231 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1232 
1233 	return 0;
1234 }
1235 
1236 static int
1237 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1238 {
1239 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1240 
1241 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1242 
1243 	return 0;
1244 }
1245 
1246 static int
1247 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1248 {
1249 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1250 
1251 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1252 
1253 	return 0;
1254 }
1255 
1256 static int
1257 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1258 {
1259 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1260 
1261 	if (dev->data->promiscuous == 1)
1262 		return 0; /* must remain in all_multicast mode */
1263 
1264 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1265 
1266 	return 0;
1267 }
1268 
1269 /**
1270  * It clears the interrupt causes and enables the interrupt.
1271  * It will be called once only during nic initialized.
1272  *
1273  * @param dev
1274  *  Pointer to struct rte_eth_dev.
1275  * @param on
1276  *  Enable or Disable.
1277  *
1278  * @return
1279  *  - On success, zero.
1280  *  - On failure, a negative value.
1281  */
1282 
1283 static int
1284 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1285 {
1286 	atl_dev_link_status_print(dev);
1287 	return 0;
1288 }
1289 
1290 static int
1291 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1292 {
1293 	return 0;
1294 }
1295 
1296 
1297 static int
1298 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1299 {
1300 	struct atl_interrupt *intr =
1301 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1302 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1303 	u64 cause = 0;
1304 
1305 	hw_atl_b0_hw_irq_read(hw, &cause);
1306 
1307 	atl_disable_intr(hw);
1308 
1309 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1310 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1311 
1312 	return 0;
1313 }
1314 
1315 /**
1316  * It gets and then prints the link status.
1317  *
1318  * @param dev
1319  *  Pointer to struct rte_eth_dev.
1320  *
1321  * @return
1322  *  - On success, zero.
1323  *  - On failure, a negative value.
1324  */
1325 static void
1326 atl_dev_link_status_print(struct rte_eth_dev *dev)
1327 {
1328 	struct rte_eth_link link;
1329 
1330 	memset(&link, 0, sizeof(link));
1331 	rte_eth_linkstatus_get(dev, &link);
1332 	if (link.link_status) {
1333 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1334 					(int)(dev->data->port_id),
1335 					(unsigned int)link.link_speed,
1336 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1337 					"full-duplex" : "half-duplex");
1338 	} else {
1339 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1340 				(int)(dev->data->port_id));
1341 	}
1342 
1343 
1344 #ifdef DEBUG
1345 {
1346 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1347 
1348 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1349 				pci_dev->addr.domain,
1350 				pci_dev->addr.bus,
1351 				pci_dev->addr.devid,
1352 				pci_dev->addr.function);
1353 }
1354 #endif
1355 
1356 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1357 }
1358 
1359 /*
1360  * It executes link_update after knowing an interrupt occurred.
1361  *
1362  * @param dev
1363  *  Pointer to struct rte_eth_dev.
1364  *
1365  * @return
1366  *  - On success, zero.
1367  *  - On failure, a negative value.
1368  */
1369 static int
1370 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1371 			   struct rte_intr_handle *intr_handle)
1372 {
1373 	struct atl_interrupt *intr =
1374 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1375 	struct atl_adapter *adapter = dev->data->dev_private;
1376 	struct aq_hw_s *hw = &adapter->hw;
1377 
1378 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1379 		goto done;
1380 
1381 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1382 
1383 	/* Notify userapp if link status changed */
1384 	if (!atl_dev_link_update(dev, 0)) {
1385 		atl_dev_link_status_print(dev);
1386 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1387 	} else {
1388 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1389 			goto done;
1390 
1391 		/* Check macsec Keys expired */
1392 		struct get_stats req = { 0 };
1393 		struct macsec_msg_fw_request msg = { 0 };
1394 		struct macsec_msg_fw_response resp = { 0 };
1395 
1396 		req.ingress_sa_index = 0x0;
1397 		req.egress_sc_index = 0x0;
1398 		req.egress_sa_index = 0x0;
1399 		msg.msg_type = macsec_get_stats_msg;
1400 		msg.stats = req;
1401 
1402 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1403 		if (err) {
1404 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1405 			goto done;
1406 		}
1407 		if (resp.stats.egress_threshold_expired ||
1408 		    resp.stats.ingress_threshold_expired ||
1409 		    resp.stats.egress_expired ||
1410 		    resp.stats.ingress_expired) {
1411 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1412 			rte_eth_dev_callback_process(dev,
1413 				RTE_ETH_EVENT_MACSEC, NULL);
1414 		}
1415 	}
1416 done:
1417 	atl_enable_intr(dev);
1418 	rte_intr_ack(intr_handle);
1419 
1420 	return 0;
1421 }
1422 
1423 /**
1424  * Interrupt handler triggered by NIC  for handling
1425  * specific interrupt.
1426  *
1427  * @param handle
1428  *  Pointer to interrupt handle.
1429  * @param param
1430  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1431  *
1432  * @return
1433  *  void
1434  */
1435 static void
1436 atl_dev_interrupt_handler(void *param)
1437 {
1438 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1439 
1440 	atl_dev_interrupt_get_status(dev);
1441 	atl_dev_interrupt_action(dev, dev->intr_handle);
1442 }
1443 
1444 
1445 static int
1446 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1447 {
1448 	return SFP_EEPROM_SIZE;
1449 }
1450 
1451 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1452 		       struct rte_dev_eeprom_info *eeprom)
1453 {
1454 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1455 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1456 
1457 	if (hw->aq_fw_ops->get_eeprom == NULL)
1458 		return -ENOTSUP;
1459 
1460 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1461 	    eeprom->data == NULL)
1462 		return -EINVAL;
1463 
1464 	if (eeprom->magic > 0x7F)
1465 		return -EINVAL;
1466 
1467 	if (eeprom->magic)
1468 		dev_addr = eeprom->magic;
1469 
1470 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1471 					 eeprom->length, eeprom->offset);
1472 }
1473 
1474 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1475 		       struct rte_dev_eeprom_info *eeprom)
1476 {
1477 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1478 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1479 
1480 	if (hw->aq_fw_ops->set_eeprom == NULL)
1481 		return -ENOTSUP;
1482 
1483 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1484 	    eeprom->data == NULL)
1485 		return -EINVAL;
1486 
1487 	if (eeprom->magic > 0x7F)
1488 		return -EINVAL;
1489 
1490 	if (eeprom->magic)
1491 		dev_addr = eeprom->magic;
1492 
1493 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1494 					 eeprom->length, eeprom->offset);
1495 }
1496 
1497 static int
1498 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1499 {
1500 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1501 	u32 mif_id;
1502 	int err;
1503 
1504 	if (regs->data == NULL) {
1505 		regs->length = hw_atl_utils_hw_get_reg_length();
1506 		regs->width = sizeof(u32);
1507 		return 0;
1508 	}
1509 
1510 	/* Only full register dump is supported */
1511 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1512 		return -ENOTSUP;
1513 
1514 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1515 
1516 	/* Device version */
1517 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1518 	regs->version = mif_id & 0xFFU;
1519 
1520 	return err;
1521 }
1522 
1523 static int
1524 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1525 {
1526 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1527 	u32 fc = AQ_NIC_FC_OFF;
1528 
1529 	if (hw->aq_fw_ops->get_flow_control == NULL)
1530 		return -ENOTSUP;
1531 
1532 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1533 
1534 	if (fc == AQ_NIC_FC_OFF)
1535 		fc_conf->mode = RTE_ETH_FC_NONE;
1536 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1537 		fc_conf->mode = RTE_ETH_FC_FULL;
1538 	else if (fc & AQ_NIC_FC_RX)
1539 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
1540 	else if (fc & AQ_NIC_FC_TX)
1541 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
1542 
1543 	return 0;
1544 }
1545 
1546 static int
1547 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1548 {
1549 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1550 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1551 
1552 
1553 	if (hw->aq_fw_ops->set_flow_control == NULL)
1554 		return -ENOTSUP;
1555 
1556 	if (fc_conf->mode == RTE_ETH_FC_NONE)
1557 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1558 	else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
1559 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1560 	else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
1561 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1562 	else if (fc_conf->mode == RTE_ETH_FC_FULL)
1563 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1564 
1565 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1566 		return hw->aq_fw_ops->set_flow_control(hw);
1567 
1568 	return 0;
1569 }
1570 
1571 static int
1572 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1573 		    u8 *mac_addr, bool enable)
1574 {
1575 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1576 	unsigned int h = 0U;
1577 	unsigned int l = 0U;
1578 	int err;
1579 
1580 	if (mac_addr) {
1581 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1582 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1583 			(mac_addr[4] << 8) | mac_addr[5];
1584 	}
1585 
1586 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1587 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1588 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1589 
1590 	if (enable)
1591 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1592 
1593 	err = aq_hw_err_from_flags(hw);
1594 
1595 	return err;
1596 }
1597 
1598 static int
1599 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1600 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1601 {
1602 	if (rte_is_zero_ether_addr(mac_addr)) {
1603 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1604 		return -EINVAL;
1605 	}
1606 
1607 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1608 }
1609 
1610 static void
1611 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1612 {
1613 	atl_update_mac_addr(dev, index, NULL, false);
1614 }
1615 
1616 static int
1617 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1618 {
1619 	atl_remove_mac_addr(dev, 0);
1620 	atl_add_mac_addr(dev, addr, 0, 0);
1621 	return 0;
1622 }
1623 
1624 static int
1625 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1626 {
1627 	struct rte_eth_dev_info dev_info;
1628 	int ret;
1629 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1630 
1631 	ret = atl_dev_info_get(dev, &dev_info);
1632 	if (ret != 0)
1633 		return ret;
1634 
1635 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1636 		return -EINVAL;
1637 
1638 	return 0;
1639 }
1640 
1641 static int
1642 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1643 {
1644 	struct aq_hw_cfg_s *cfg =
1645 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1646 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1647 	int err = 0;
1648 	int i = 0;
1649 
1650 	PMD_INIT_FUNC_TRACE();
1651 
1652 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1653 		if (cfg->vlan_filter[i] == vlan_id) {
1654 			if (!on) {
1655 				/* Disable VLAN filter. */
1656 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1657 
1658 				/* Clear VLAN filter entry */
1659 				cfg->vlan_filter[i] = 0;
1660 			}
1661 			break;
1662 		}
1663 	}
1664 
1665 	/* VLAN_ID was not found. So, nothing to delete. */
1666 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1667 		goto exit;
1668 
1669 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1670 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1671 		goto exit;
1672 
1673 	/* Try to found free VLAN filter to add new VLAN_ID */
1674 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1675 		if (cfg->vlan_filter[i] == 0)
1676 			break;
1677 	}
1678 
1679 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1680 		/* We have no free VLAN filter to add new VLAN_ID*/
1681 		err = -ENOMEM;
1682 		goto exit;
1683 	}
1684 
1685 	cfg->vlan_filter[i] = vlan_id;
1686 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1687 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1688 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1689 
1690 exit:
1691 	/* Enable VLAN promisc mode if vlan_filter empty  */
1692 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1693 		if (cfg->vlan_filter[i] != 0)
1694 			break;
1695 	}
1696 
1697 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1698 
1699 	return err;
1700 }
1701 
1702 static int
1703 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1704 {
1705 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1706 	struct aq_hw_cfg_s *cfg =
1707 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1708 	int i;
1709 
1710 	PMD_INIT_FUNC_TRACE();
1711 
1712 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1713 		if (cfg->vlan_filter[i])
1714 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1715 	}
1716 	return 0;
1717 }
1718 
1719 static int
1720 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1721 {
1722 	struct aq_hw_cfg_s *cfg =
1723 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1724 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1725 	int ret = 0;
1726 	int i;
1727 
1728 	PMD_INIT_FUNC_TRACE();
1729 
1730 	ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
1731 
1732 	cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
1733 
1734 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1735 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1736 
1737 	if (mask & RTE_ETH_VLAN_EXTEND_MASK)
1738 		ret = -ENOTSUP;
1739 
1740 	return ret;
1741 }
1742 
1743 static int
1744 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1745 		  uint16_t tpid)
1746 {
1747 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1748 	int err = 0;
1749 
1750 	PMD_INIT_FUNC_TRACE();
1751 
1752 	switch (vlan_type) {
1753 	case RTE_ETH_VLAN_TYPE_INNER:
1754 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1755 		break;
1756 	case RTE_ETH_VLAN_TYPE_OUTER:
1757 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1758 		break;
1759 	default:
1760 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1761 		err = -ENOTSUP;
1762 	}
1763 
1764 	return err;
1765 }
1766 
1767 static void
1768 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1769 {
1770 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1771 
1772 	PMD_INIT_FUNC_TRACE();
1773 
1774 	if (queue_id > dev->data->nb_rx_queues) {
1775 		PMD_DRV_LOG(ERR, "Invalid queue id");
1776 		return;
1777 	}
1778 
1779 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1780 }
1781 
1782 static int
1783 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1784 			  struct rte_ether_addr *mc_addr_set,
1785 			  uint32_t nb_mc_addr)
1786 {
1787 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1788 	u32 i;
1789 
1790 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1791 		return -EINVAL;
1792 
1793 	/* Update whole uc filters table */
1794 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1795 		u8 *mac_addr = NULL;
1796 		u32 l = 0, h = 0;
1797 
1798 		if (i < nb_mc_addr) {
1799 			mac_addr = mc_addr_set[i].addr_bytes;
1800 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1801 				(mac_addr[4] << 8) | mac_addr[5];
1802 			h = (mac_addr[0] << 8) | mac_addr[1];
1803 		}
1804 
1805 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1806 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1807 							HW_ATL_B0_MAC_MIN + i);
1808 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1809 							HW_ATL_B0_MAC_MIN + i);
1810 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1811 					   HW_ATL_B0_MAC_MIN + i);
1812 	}
1813 
1814 	return 0;
1815 }
1816 
1817 static int
1818 atl_reta_update(struct rte_eth_dev *dev,
1819 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1820 		   uint16_t reta_size)
1821 {
1822 	int i;
1823 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1824 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1825 
1826 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1827 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1828 					dev->data->nb_rx_queues - 1);
1829 
1830 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1831 	return 0;
1832 }
1833 
1834 static int
1835 atl_reta_query(struct rte_eth_dev *dev,
1836 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1837 		    uint16_t reta_size)
1838 {
1839 	int i;
1840 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1841 
1842 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1843 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1844 	reta_conf->mask = ~0U;
1845 	return 0;
1846 }
1847 
1848 static int
1849 atl_rss_hash_update(struct rte_eth_dev *dev,
1850 				 struct rte_eth_rss_conf *rss_conf)
1851 {
1852 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1853 	struct aq_hw_cfg_s *cfg =
1854 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1855 	static u8 def_rss_key[40] = {
1856 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1857 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1858 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1859 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1860 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1861 	};
1862 
1863 	cfg->is_rss = !!rss_conf->rss_hf;
1864 	if (rss_conf->rss_key) {
1865 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1866 		       rss_conf->rss_key_len);
1867 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1868 	} else {
1869 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1870 		       sizeof(def_rss_key));
1871 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1872 	}
1873 
1874 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1875 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1876 	return 0;
1877 }
1878 
1879 static int
1880 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1881 				 struct rte_eth_rss_conf *rss_conf)
1882 {
1883 	struct aq_hw_cfg_s *cfg =
1884 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1885 
1886 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1887 	if (rss_conf->rss_key) {
1888 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1889 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1890 		       rss_conf->rss_key_len);
1891 	}
1892 
1893 	return 0;
1894 }
1895 
1896 static bool
1897 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1898 {
1899 	if (strcmp(dev->device->driver->name, drv->driver.name))
1900 		return false;
1901 
1902 	return true;
1903 }
1904 
1905 bool
1906 is_atlantic_supported(struct rte_eth_dev *dev)
1907 {
1908 	return is_device_supported(dev, &rte_atl_pmd);
1909 }
1910 
1911 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1912 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1913 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1914 RTE_LOG_REGISTER_SUFFIX(atl_logtype_init, init, NOTICE);
1915 RTE_LOG_REGISTER_SUFFIX(atl_logtype_driver, driver, NOTICE);
1916