xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision c39d1e082a4b426e915074ce30eb6f410ee2654a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 				    struct rte_eth_xstat_name *xstats_names,
35 				    unsigned int size);
36 
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 				struct rte_eth_stats *stats);
39 
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 			      struct rte_eth_xstat *stats, unsigned int n);
42 
43 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
44 
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 			      size_t fw_size);
47 
48 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
49 
50 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
51 
52 /* VLAN stuff */
53 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
54 		uint16_t vlan_id, int on);
55 
56 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
57 
58 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
59 				     uint16_t queue_id, int on);
60 
61 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
62 			     enum rte_vlan_type vlan_type, uint16_t tpid);
63 
64 /* EEPROM */
65 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
66 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
67 			      struct rte_dev_eeprom_info *eeprom);
68 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
69 			      struct rte_dev_eeprom_info *eeprom);
70 
71 /* Regs */
72 static int atl_dev_get_regs(struct rte_eth_dev *dev,
73 			    struct rte_dev_reg_info *regs);
74 
75 /* Flow control */
76 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
77 			       struct rte_eth_fc_conf *fc_conf);
78 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
79 			       struct rte_eth_fc_conf *fc_conf);
80 
81 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
82 
83 /* Interrupts */
84 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
85 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
86 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
87 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
88 				    struct rte_intr_handle *handle);
89 static void atl_dev_interrupt_handler(void *param);
90 
91 
92 static int atl_add_mac_addr(struct rte_eth_dev *dev,
93 			    struct rte_ether_addr *mac_addr,
94 			    uint32_t index, uint32_t pool);
95 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
96 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
97 					   struct rte_ether_addr *mac_addr);
98 
99 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
100 				    struct rte_ether_addr *mc_addr_set,
101 				    uint32_t nb_mc_addr);
102 
103 /* RSS */
104 static int atl_reta_update(struct rte_eth_dev *dev,
105 			     struct rte_eth_rss_reta_entry64 *reta_conf,
106 			     uint16_t reta_size);
107 static int atl_reta_query(struct rte_eth_dev *dev,
108 			    struct rte_eth_rss_reta_entry64 *reta_conf,
109 			    uint16_t reta_size);
110 static int atl_rss_hash_update(struct rte_eth_dev *dev,
111 				 struct rte_eth_rss_conf *rss_conf);
112 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
113 				   struct rte_eth_rss_conf *rss_conf);
114 
115 
116 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
117 	struct rte_pci_device *pci_dev);
118 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
119 
120 static int atl_dev_info_get(struct rte_eth_dev *dev,
121 				struct rte_eth_dev_info *dev_info);
122 
123 int atl_logtype_init;
124 int atl_logtype_driver;
125 
126 /*
127  * The set of PCI devices this driver supports
128  */
129 static const struct rte_pci_id pci_id_atl_map[] = {
130 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
135 
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
142 
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
149 
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
152 	{ .vendor_id = 0, /* sentinel */ },
153 };
154 
155 static struct rte_pci_driver rte_atl_pmd = {
156 	.id_table = pci_id_atl_map,
157 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
158 	.probe = eth_atl_pci_probe,
159 	.remove = eth_atl_pci_remove,
160 };
161 
162 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
163 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
164 			| DEV_RX_OFFLOAD_UDP_CKSUM \
165 			| DEV_RX_OFFLOAD_TCP_CKSUM \
166 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
167 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
168 			| DEV_RX_OFFLOAD_VLAN_FILTER)
169 
170 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
171 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
172 			| DEV_TX_OFFLOAD_UDP_CKSUM \
173 			| DEV_TX_OFFLOAD_TCP_CKSUM \
174 			| DEV_TX_OFFLOAD_TCP_TSO \
175 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
176 			| DEV_TX_OFFLOAD_MULTI_SEGS)
177 
178 #define SFP_EEPROM_SIZE 0x100
179 
180 static const struct rte_eth_desc_lim rx_desc_lim = {
181 	.nb_max = ATL_MAX_RING_DESC,
182 	.nb_min = ATL_MIN_RING_DESC,
183 	.nb_align = ATL_RXD_ALIGN,
184 };
185 
186 static const struct rte_eth_desc_lim tx_desc_lim = {
187 	.nb_max = ATL_MAX_RING_DESC,
188 	.nb_min = ATL_MIN_RING_DESC,
189 	.nb_align = ATL_TXD_ALIGN,
190 	.nb_seg_max = ATL_TX_MAX_SEG,
191 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
192 };
193 
194 enum atl_xstats_type {
195 	XSTATS_TYPE_MSM = 0,
196 	XSTATS_TYPE_MACSEC,
197 };
198 
199 #define ATL_XSTATS_FIELD(name) { \
200 	#name, \
201 	offsetof(struct aq_stats_s, name), \
202 	XSTATS_TYPE_MSM \
203 }
204 
205 #define ATL_MACSEC_XSTATS_FIELD(name) { \
206 	#name, \
207 	offsetof(struct macsec_stats, name), \
208 	XSTATS_TYPE_MACSEC \
209 }
210 
211 struct atl_xstats_tbl_s {
212 	const char *name;
213 	unsigned int offset;
214 	enum atl_xstats_type type;
215 };
216 
217 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
218 	ATL_XSTATS_FIELD(uprc),
219 	ATL_XSTATS_FIELD(mprc),
220 	ATL_XSTATS_FIELD(bprc),
221 	ATL_XSTATS_FIELD(erpt),
222 	ATL_XSTATS_FIELD(uptc),
223 	ATL_XSTATS_FIELD(mptc),
224 	ATL_XSTATS_FIELD(bptc),
225 	ATL_XSTATS_FIELD(erpr),
226 	ATL_XSTATS_FIELD(ubrc),
227 	ATL_XSTATS_FIELD(ubtc),
228 	ATL_XSTATS_FIELD(mbrc),
229 	ATL_XSTATS_FIELD(mbtc),
230 	ATL_XSTATS_FIELD(bbrc),
231 	ATL_XSTATS_FIELD(bbtc),
232 	/* Ingress Common Counters */
233 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
234 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
235 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
236 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
237 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
239 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
241 	/* Ingress SA Counters */
242 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
244 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
245 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
246 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
247 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
250 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
251 	/* Egress Common Counters */
252 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
254 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
255 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
256 	/* Egress SC Counters */
257 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
259 	/* Egress SA Counters */
260 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
261 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
262 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
263 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
264 };
265 
266 static const struct eth_dev_ops atl_eth_dev_ops = {
267 	.dev_configure	      = atl_dev_configure,
268 	.dev_start	      = atl_dev_start,
269 	.dev_stop	      = atl_dev_stop,
270 	.dev_set_link_up      = atl_dev_set_link_up,
271 	.dev_set_link_down    = atl_dev_set_link_down,
272 	.dev_close	      = atl_dev_close,
273 	.dev_reset	      = atl_dev_reset,
274 
275 	/* PROMISC */
276 	.promiscuous_enable   = atl_dev_promiscuous_enable,
277 	.promiscuous_disable  = atl_dev_promiscuous_disable,
278 	.allmulticast_enable  = atl_dev_allmulticast_enable,
279 	.allmulticast_disable = atl_dev_allmulticast_disable,
280 
281 	/* Link */
282 	.link_update	      = atl_dev_link_update,
283 
284 	.get_reg              = atl_dev_get_regs,
285 
286 	/* Stats */
287 	.stats_get	      = atl_dev_stats_get,
288 	.xstats_get	      = atl_dev_xstats_get,
289 	.xstats_get_names     = atl_dev_xstats_get_names,
290 	.stats_reset	      = atl_dev_stats_reset,
291 	.xstats_reset	      = atl_dev_stats_reset,
292 
293 	.fw_version_get       = atl_fw_version_get,
294 	.dev_infos_get	      = atl_dev_info_get,
295 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
296 
297 	.mtu_set              = atl_dev_mtu_set,
298 
299 	/* VLAN */
300 	.vlan_filter_set      = atl_vlan_filter_set,
301 	.vlan_offload_set     = atl_vlan_offload_set,
302 	.vlan_tpid_set        = atl_vlan_tpid_set,
303 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
304 
305 	/* Queue Control */
306 	.rx_queue_start	      = atl_rx_queue_start,
307 	.rx_queue_stop	      = atl_rx_queue_stop,
308 	.rx_queue_setup       = atl_rx_queue_setup,
309 	.rx_queue_release     = atl_rx_queue_release,
310 
311 	.tx_queue_start	      = atl_tx_queue_start,
312 	.tx_queue_stop	      = atl_tx_queue_stop,
313 	.tx_queue_setup       = atl_tx_queue_setup,
314 	.tx_queue_release     = atl_tx_queue_release,
315 
316 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
317 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
318 
319 	.rx_queue_count       = atl_rx_queue_count,
320 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
321 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
322 
323 	/* EEPROM */
324 	.get_eeprom_length    = atl_dev_get_eeprom_length,
325 	.get_eeprom           = atl_dev_get_eeprom,
326 	.set_eeprom           = atl_dev_set_eeprom,
327 
328 	/* Flow Control */
329 	.flow_ctrl_get	      = atl_flow_ctrl_get,
330 	.flow_ctrl_set	      = atl_flow_ctrl_set,
331 
332 	/* MAC */
333 	.mac_addr_add	      = atl_add_mac_addr,
334 	.mac_addr_remove      = atl_remove_mac_addr,
335 	.mac_addr_set	      = atl_set_default_mac_addr,
336 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
337 	.rxq_info_get	      = atl_rxq_info_get,
338 	.txq_info_get	      = atl_txq_info_get,
339 
340 	.reta_update          = atl_reta_update,
341 	.reta_query           = atl_reta_query,
342 	.rss_hash_update      = atl_rss_hash_update,
343 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
344 };
345 
346 static inline int32_t
347 atl_reset_hw(struct aq_hw_s *hw)
348 {
349 	return hw_atl_b0_hw_reset(hw);
350 }
351 
352 static inline void
353 atl_enable_intr(struct rte_eth_dev *dev)
354 {
355 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
356 
357 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
358 }
359 
360 static void
361 atl_disable_intr(struct aq_hw_s *hw)
362 {
363 	PMD_INIT_FUNC_TRACE();
364 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
365 }
366 
367 static int
368 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
369 {
370 	struct atl_adapter *adapter = eth_dev->data->dev_private;
371 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
372 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
373 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
374 	int err = 0;
375 
376 	PMD_INIT_FUNC_TRACE();
377 
378 	eth_dev->dev_ops = &atl_eth_dev_ops;
379 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
380 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
381 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
382 
383 	/* For secondary processes, the primary process has done all the work */
384 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
385 		return 0;
386 
387 	/* Vendor and Device ID need to be set before init of shared code */
388 	hw->device_id = pci_dev->id.device_id;
389 	hw->vendor_id = pci_dev->id.vendor_id;
390 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
391 
392 	/* Hardware configuration - hardcode */
393 	adapter->hw_cfg.is_lro = false;
394 	adapter->hw_cfg.wol = false;
395 	adapter->hw_cfg.is_rss = false;
396 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
397 
398 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
399 			  AQ_NIC_RATE_5G |
400 			  AQ_NIC_RATE_2G5 |
401 			  AQ_NIC_RATE_1G |
402 			  AQ_NIC_RATE_100M;
403 
404 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
405 	adapter->hw_cfg.aq_rss.indirection_table_size =
406 		HW_ATL_B0_RSS_REDIRECTION_MAX;
407 
408 	hw->aq_nic_cfg = &adapter->hw_cfg;
409 
410 	pthread_mutex_init(&hw->mbox_mutex, NULL);
411 
412 	/* disable interrupt */
413 	atl_disable_intr(hw);
414 
415 	/* Allocate memory for storing MAC addresses */
416 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
417 					RTE_ETHER_ADDR_LEN, 0);
418 	if (eth_dev->data->mac_addrs == NULL) {
419 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
420 		return -ENOMEM;
421 	}
422 
423 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
424 	if (err)
425 		return err;
426 
427 	/* Copy the permanent MAC address */
428 	if (hw->aq_fw_ops->get_mac_permanent(hw,
429 			eth_dev->data->mac_addrs->addr_bytes) != 0)
430 		return -EINVAL;
431 
432 	/* Reset the hw statistics */
433 	atl_dev_stats_reset(eth_dev);
434 
435 	rte_intr_callback_register(intr_handle,
436 				   atl_dev_interrupt_handler, eth_dev);
437 
438 	/* enable uio/vfio intr/eventfd mapping */
439 	rte_intr_enable(intr_handle);
440 
441 	/* enable support intr */
442 	atl_enable_intr(eth_dev);
443 
444 	return err;
445 }
446 
447 static int
448 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
449 {
450 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
451 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
452 	struct aq_hw_s *hw;
453 
454 	PMD_INIT_FUNC_TRACE();
455 
456 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
457 		return -EPERM;
458 
459 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
460 
461 	if (hw->adapter_stopped == 0)
462 		atl_dev_close(eth_dev);
463 
464 	eth_dev->dev_ops = NULL;
465 	eth_dev->rx_pkt_burst = NULL;
466 	eth_dev->tx_pkt_burst = NULL;
467 
468 	/* disable uio intr before callback unregister */
469 	rte_intr_disable(intr_handle);
470 	rte_intr_callback_unregister(intr_handle,
471 				     atl_dev_interrupt_handler, eth_dev);
472 
473 	rte_free(eth_dev->data->mac_addrs);
474 	eth_dev->data->mac_addrs = NULL;
475 
476 	pthread_mutex_destroy(&hw->mbox_mutex);
477 
478 	return 0;
479 }
480 
481 static int
482 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
483 	struct rte_pci_device *pci_dev)
484 {
485 	return rte_eth_dev_pci_generic_probe(pci_dev,
486 		sizeof(struct atl_adapter), eth_atl_dev_init);
487 }
488 
489 static int
490 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
491 {
492 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
493 }
494 
495 static int
496 atl_dev_configure(struct rte_eth_dev *dev)
497 {
498 	struct atl_interrupt *intr =
499 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
500 
501 	PMD_INIT_FUNC_TRACE();
502 
503 	/* set flag to update link status after init */
504 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
505 
506 	return 0;
507 }
508 
509 /*
510  * Configure device link speed and setup link.
511  * It returns 0 on success.
512  */
513 static int
514 atl_dev_start(struct rte_eth_dev *dev)
515 {
516 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
517 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
518 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
519 	uint32_t intr_vector = 0;
520 	int status;
521 	int err;
522 
523 	PMD_INIT_FUNC_TRACE();
524 
525 	/* set adapter started */
526 	hw->adapter_stopped = 0;
527 
528 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
529 		PMD_INIT_LOG(ERR,
530 		"Invalid link_speeds for port %u, fix speed not supported",
531 				dev->data->port_id);
532 		return -EINVAL;
533 	}
534 
535 	/* disable uio/vfio intr/eventfd mapping */
536 	rte_intr_disable(intr_handle);
537 
538 	/* reinitialize adapter
539 	 * this calls reset and start
540 	 */
541 	status = atl_reset_hw(hw);
542 	if (status != 0)
543 		return -EIO;
544 
545 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
546 
547 	hw_atl_b0_hw_start(hw);
548 	/* check and configure queue intr-vector mapping */
549 	if ((rte_intr_cap_multiple(intr_handle) ||
550 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
551 	    dev->data->dev_conf.intr_conf.rxq != 0) {
552 		intr_vector = dev->data->nb_rx_queues;
553 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
554 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
555 					ATL_MAX_INTR_QUEUE_NUM);
556 			return -ENOTSUP;
557 		}
558 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
559 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
560 			return -1;
561 		}
562 	}
563 
564 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
565 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
566 				    dev->data->nb_rx_queues * sizeof(int), 0);
567 		if (intr_handle->intr_vec == NULL) {
568 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
569 				     " intr_vec", dev->data->nb_rx_queues);
570 			return -ENOMEM;
571 		}
572 	}
573 
574 	/* initialize transmission unit */
575 	atl_tx_init(dev);
576 
577 	/* This can fail when allocating mbufs for descriptor rings */
578 	err = atl_rx_init(dev);
579 	if (err) {
580 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
581 		goto error;
582 	}
583 
584 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
585 		hw->fw_ver_actual >> 24,
586 		(hw->fw_ver_actual >> 16) & 0xFF,
587 		hw->fw_ver_actual & 0xFFFF);
588 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
589 
590 	err = atl_start_queues(dev);
591 	if (err < 0) {
592 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
593 		goto error;
594 	}
595 
596 	err = atl_dev_set_link_up(dev);
597 
598 	err = hw->aq_fw_ops->update_link_status(hw);
599 
600 	if (err)
601 		goto error;
602 
603 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
604 
605 	if (rte_intr_allow_others(intr_handle)) {
606 		/* check if lsc interrupt is enabled */
607 		if (dev->data->dev_conf.intr_conf.lsc != 0)
608 			atl_dev_lsc_interrupt_setup(dev, true);
609 		else
610 			atl_dev_lsc_interrupt_setup(dev, false);
611 	} else {
612 		rte_intr_callback_unregister(intr_handle,
613 					     atl_dev_interrupt_handler, dev);
614 		if (dev->data->dev_conf.intr_conf.lsc != 0)
615 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
616 				     " no intr multiplex");
617 	}
618 
619 	/* check if rxq interrupt is enabled */
620 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
621 	    rte_intr_dp_is_en(intr_handle))
622 		atl_dev_rxq_interrupt_setup(dev);
623 
624 	/* enable uio/vfio intr/eventfd mapping */
625 	rte_intr_enable(intr_handle);
626 
627 	/* resume enabled intr since hw reset */
628 	atl_enable_intr(dev);
629 
630 	return 0;
631 
632 error:
633 	atl_stop_queues(dev);
634 	return -EIO;
635 }
636 
637 /*
638  * Stop device: disable rx and tx functions to allow for reconfiguring.
639  */
640 static void
641 atl_dev_stop(struct rte_eth_dev *dev)
642 {
643 	struct rte_eth_link link;
644 	struct aq_hw_s *hw =
645 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
646 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
647 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
648 
649 	PMD_INIT_FUNC_TRACE();
650 
651 	/* disable interrupts */
652 	atl_disable_intr(hw);
653 
654 	/* reset the NIC */
655 	atl_reset_hw(hw);
656 	hw->adapter_stopped = 1;
657 
658 	atl_stop_queues(dev);
659 
660 	/* Clear stored conf */
661 	dev->data->scattered_rx = 0;
662 	dev->data->lro = 0;
663 
664 	/* Clear recorded link status */
665 	memset(&link, 0, sizeof(link));
666 	rte_eth_linkstatus_set(dev, &link);
667 
668 	if (!rte_intr_allow_others(intr_handle))
669 		/* resume to the default handler */
670 		rte_intr_callback_register(intr_handle,
671 					   atl_dev_interrupt_handler,
672 					   (void *)dev);
673 
674 	/* Clean datapath event and queue/vec mapping */
675 	rte_intr_efd_disable(intr_handle);
676 	if (intr_handle->intr_vec != NULL) {
677 		rte_free(intr_handle->intr_vec);
678 		intr_handle->intr_vec = NULL;
679 	}
680 }
681 
682 /*
683  * Set device link up: enable tx.
684  */
685 static int
686 atl_dev_set_link_up(struct rte_eth_dev *dev)
687 {
688 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
689 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
690 	uint32_t speed_mask = 0;
691 
692 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
693 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
694 	} else {
695 		if (link_speeds & ETH_LINK_SPEED_10G)
696 			speed_mask |= AQ_NIC_RATE_10G;
697 		if (link_speeds & ETH_LINK_SPEED_5G)
698 			speed_mask |= AQ_NIC_RATE_5G;
699 		if (link_speeds & ETH_LINK_SPEED_1G)
700 			speed_mask |= AQ_NIC_RATE_1G;
701 		if (link_speeds & ETH_LINK_SPEED_2_5G)
702 			speed_mask |=  AQ_NIC_RATE_2G5;
703 		if (link_speeds & ETH_LINK_SPEED_100M)
704 			speed_mask |= AQ_NIC_RATE_100M;
705 	}
706 
707 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
708 }
709 
710 /*
711  * Set device link down: disable tx.
712  */
713 static int
714 atl_dev_set_link_down(struct rte_eth_dev *dev)
715 {
716 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
717 
718 	return hw->aq_fw_ops->set_link_speed(hw, 0);
719 }
720 
721 /*
722  * Reset and stop device.
723  */
724 static void
725 atl_dev_close(struct rte_eth_dev *dev)
726 {
727 	PMD_INIT_FUNC_TRACE();
728 
729 	atl_dev_stop(dev);
730 
731 	atl_free_queues(dev);
732 }
733 
734 static int
735 atl_dev_reset(struct rte_eth_dev *dev)
736 {
737 	int ret;
738 
739 	ret = eth_atl_dev_uninit(dev);
740 	if (ret)
741 		return ret;
742 
743 	ret = eth_atl_dev_init(dev);
744 
745 	return ret;
746 }
747 
748 static int
749 atl_dev_configure_macsec(struct rte_eth_dev *dev)
750 {
751 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
752 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
753 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
754 	struct macsec_msg_fw_request msg_macsec;
755 	struct macsec_msg_fw_response response;
756 
757 	if (!aqcfg->common.macsec_enabled ||
758 	    hw->aq_fw_ops->send_macsec_req == NULL)
759 		return 0;
760 
761 	memset(&msg_macsec, 0, sizeof(msg_macsec));
762 
763 	/* Creating set of sc/sa structures from parameters provided by DPDK */
764 
765 	/* Configure macsec */
766 	msg_macsec.msg_type = macsec_cfg_msg;
767 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
768 	msg_macsec.cfg.interrupts_enabled = 1;
769 
770 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
771 
772 	if (response.result)
773 		return -1;
774 
775 	memset(&msg_macsec, 0, sizeof(msg_macsec));
776 
777 	/* Configure TX SC */
778 
779 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
780 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
781 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
782 
783 	/* MAC addr for TX */
784 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
785 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
786 	msg_macsec.txsc.sa_mask = 0x3f;
787 
788 	msg_macsec.txsc.da_mask = 0;
789 	msg_macsec.txsc.tci = 0x0B;
790 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
791 
792 	/*
793 	 * Creating SCI (Secure Channel Identifier).
794 	 * SCI constructed from Source MAC and Port identifier
795 	 */
796 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
797 			       (msg_macsec.txsc.mac_sa[0] >> 16);
798 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
799 
800 	uint32_t port_identifier = 1;
801 
802 	msg_macsec.txsc.sci[1] = sci_hi_part;
803 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
804 
805 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
806 
807 	if (response.result)
808 		return -1;
809 
810 	memset(&msg_macsec, 0, sizeof(msg_macsec));
811 
812 	/* Configure RX SC */
813 
814 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
815 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
816 	msg_macsec.rxsc.replay_protect =
817 		aqcfg->common.replay_protection_enabled;
818 	msg_macsec.rxsc.anti_replay_window = 0;
819 
820 	/* MAC addr for RX */
821 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
822 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
823 	msg_macsec.rxsc.da_mask = 0;//0x3f;
824 
825 	msg_macsec.rxsc.sa_mask = 0;
826 
827 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
828 
829 	if (response.result)
830 		return -1;
831 
832 	memset(&msg_macsec, 0, sizeof(msg_macsec));
833 
834 	/* Configure RX SC */
835 
836 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
837 	msg_macsec.txsa.index = aqcfg->txsa.idx;
838 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
839 
840 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
841 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
842 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
843 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
844 
845 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
846 
847 	if (response.result)
848 		return -1;
849 
850 	memset(&msg_macsec, 0, sizeof(msg_macsec));
851 
852 	/* Configure RX SA */
853 
854 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
855 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
856 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
857 
858 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
859 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
860 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
861 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
862 
863 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
864 
865 	if (response.result)
866 		return -1;
867 
868 	return 0;
869 }
870 
871 int atl_macsec_enable(struct rte_eth_dev *dev,
872 		      uint8_t encr, uint8_t repl_prot)
873 {
874 	struct aq_hw_cfg_s *cfg =
875 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
876 
877 	cfg->aq_macsec.common.macsec_enabled = 1;
878 	cfg->aq_macsec.common.encryption_enabled = encr;
879 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
880 
881 	return 0;
882 }
883 
884 int atl_macsec_disable(struct rte_eth_dev *dev)
885 {
886 	struct aq_hw_cfg_s *cfg =
887 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
888 
889 	cfg->aq_macsec.common.macsec_enabled = 0;
890 
891 	return 0;
892 }
893 
894 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
895 {
896 	struct aq_hw_cfg_s *cfg =
897 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
898 
899 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
900 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
901 		RTE_ETHER_ADDR_LEN);
902 
903 	return 0;
904 }
905 
906 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
907 			   uint8_t *mac, uint16_t pi)
908 {
909 	struct aq_hw_cfg_s *cfg =
910 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
911 
912 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
913 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
914 		RTE_ETHER_ADDR_LEN);
915 	cfg->aq_macsec.rxsc.pi = pi;
916 
917 	return 0;
918 }
919 
920 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
921 			   uint8_t idx, uint8_t an,
922 			   uint32_t pn, uint8_t *key)
923 {
924 	struct aq_hw_cfg_s *cfg =
925 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
926 
927 	cfg->aq_macsec.txsa.idx = idx;
928 	cfg->aq_macsec.txsa.pn = pn;
929 	cfg->aq_macsec.txsa.an = an;
930 
931 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
932 	return 0;
933 }
934 
935 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
936 			   uint8_t idx, uint8_t an,
937 			   uint32_t pn, uint8_t *key)
938 {
939 	struct aq_hw_cfg_s *cfg =
940 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
941 
942 	cfg->aq_macsec.rxsa.idx = idx;
943 	cfg->aq_macsec.rxsa.pn = pn;
944 	cfg->aq_macsec.rxsa.an = an;
945 
946 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
947 	return 0;
948 }
949 
950 static int
951 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
952 {
953 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
954 	struct aq_hw_s *hw = &adapter->hw;
955 	struct atl_sw_stats *swstats = &adapter->sw_stats;
956 	unsigned int i;
957 
958 	hw->aq_fw_ops->update_stats(hw);
959 
960 	/* Fill out the rte_eth_stats statistics structure */
961 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
962 	stats->ibytes = hw->curr_stats.dma_oct_rc;
963 	stats->imissed = hw->curr_stats.dpc;
964 	stats->ierrors = hw->curr_stats.erpt;
965 
966 	stats->opackets = hw->curr_stats.dma_pkt_tc;
967 	stats->obytes = hw->curr_stats.dma_oct_tc;
968 	stats->oerrors = 0;
969 
970 	stats->rx_nombuf = swstats->rx_nombuf;
971 
972 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
973 		stats->q_ipackets[i] = swstats->q_ipackets[i];
974 		stats->q_opackets[i] = swstats->q_opackets[i];
975 		stats->q_ibytes[i] = swstats->q_ibytes[i];
976 		stats->q_obytes[i] = swstats->q_obytes[i];
977 		stats->q_errors[i] = swstats->q_errors[i];
978 	}
979 	return 0;
980 }
981 
982 static int
983 atl_dev_stats_reset(struct rte_eth_dev *dev)
984 {
985 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
986 	struct aq_hw_s *hw = &adapter->hw;
987 
988 	hw->aq_fw_ops->update_stats(hw);
989 
990 	/* Reset software totals */
991 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
992 
993 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
994 
995 	return 0;
996 }
997 
998 static int
999 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
1000 {
1001 	struct atl_adapter *adapter =
1002 		(struct atl_adapter *)dev->data->dev_private;
1003 
1004 	struct aq_hw_s *hw = &adapter->hw;
1005 	unsigned int i, count = 0;
1006 
1007 	for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
1008 		if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
1009 			((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
1010 			continue;
1011 
1012 		count++;
1013 	}
1014 
1015 	return count;
1016 }
1017 
1018 static int
1019 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1020 			 struct rte_eth_xstat_name *xstats_names,
1021 			 unsigned int size)
1022 {
1023 	unsigned int i;
1024 	unsigned int count = atl_dev_xstats_get_count(dev);
1025 
1026 	if (xstats_names) {
1027 		for (i = 0; i < size && i < count; i++) {
1028 			snprintf(xstats_names[i].name,
1029 				RTE_ETH_XSTATS_NAME_SIZE, "%s",
1030 				atl_xstats_tbl[i].name);
1031 		}
1032 	}
1033 
1034 	return count;
1035 }
1036 
1037 static int
1038 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1039 		   unsigned int n)
1040 {
1041 	struct atl_adapter *adapter = dev->data->dev_private;
1042 	struct aq_hw_s *hw = &adapter->hw;
1043 	struct get_stats req = { 0 };
1044 	struct macsec_msg_fw_request msg = { 0 };
1045 	struct macsec_msg_fw_response resp = { 0 };
1046 	int err = -1;
1047 	unsigned int i;
1048 	unsigned int count = atl_dev_xstats_get_count(dev);
1049 
1050 	if (!stats)
1051 		return count;
1052 
1053 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1054 		req.ingress_sa_index = 0xff;
1055 		req.egress_sc_index = 0xff;
1056 		req.egress_sa_index = 0xff;
1057 
1058 		msg.msg_type = macsec_get_stats_msg;
1059 		msg.stats = req;
1060 
1061 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1062 	}
1063 
1064 	for (i = 0; i < n && i < count; i++) {
1065 		stats[i].id = i;
1066 
1067 		switch (atl_xstats_tbl[i].type) {
1068 		case XSTATS_TYPE_MSM:
1069 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1070 					 atl_xstats_tbl[i].offset);
1071 			break;
1072 		case XSTATS_TYPE_MACSEC:
1073 			if (!err) {
1074 				stats[i].value =
1075 					*(u64 *)((uint8_t *)&resp.stats +
1076 					atl_xstats_tbl[i].offset);
1077 			}
1078 			break;
1079 		}
1080 	}
1081 
1082 	return i;
1083 }
1084 
1085 static int
1086 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1087 {
1088 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1089 	uint32_t fw_ver = 0;
1090 	unsigned int ret = 0;
1091 
1092 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1093 	if (ret)
1094 		return -EIO;
1095 
1096 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1097 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1098 
1099 	ret += 1; /* add string null-terminator */
1100 
1101 	if (fw_size < ret)
1102 		return ret;
1103 
1104 	return 0;
1105 }
1106 
1107 static int
1108 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1109 {
1110 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1111 
1112 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1113 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1114 
1115 	dev_info->min_rx_bufsize = 1024;
1116 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1117 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1118 	dev_info->max_vfs = pci_dev->max_vfs;
1119 
1120 	dev_info->max_hash_mac_addrs = 0;
1121 	dev_info->max_vmdq_pools = 0;
1122 	dev_info->vmdq_queue_num = 0;
1123 
1124 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1125 
1126 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1127 
1128 
1129 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1130 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1131 	};
1132 
1133 	dev_info->default_txconf = (struct rte_eth_txconf) {
1134 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1135 	};
1136 
1137 	dev_info->rx_desc_lim = rx_desc_lim;
1138 	dev_info->tx_desc_lim = tx_desc_lim;
1139 
1140 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1141 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1142 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1143 
1144 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1145 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1146 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1147 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1148 
1149 	return 0;
1150 }
1151 
1152 static const uint32_t *
1153 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1154 {
1155 	static const uint32_t ptypes[] = {
1156 		RTE_PTYPE_L2_ETHER,
1157 		RTE_PTYPE_L2_ETHER_ARP,
1158 		RTE_PTYPE_L2_ETHER_VLAN,
1159 		RTE_PTYPE_L3_IPV4,
1160 		RTE_PTYPE_L3_IPV6,
1161 		RTE_PTYPE_L4_TCP,
1162 		RTE_PTYPE_L4_UDP,
1163 		RTE_PTYPE_L4_SCTP,
1164 		RTE_PTYPE_L4_ICMP,
1165 		RTE_PTYPE_UNKNOWN
1166 	};
1167 
1168 	if (dev->rx_pkt_burst == atl_recv_pkts)
1169 		return ptypes;
1170 
1171 	return NULL;
1172 }
1173 
1174 static void
1175 atl_dev_delayed_handler(void *param)
1176 {
1177 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1178 
1179 	atl_dev_configure_macsec(dev);
1180 }
1181 
1182 
1183 /* return 0 means link status changed, -1 means not changed */
1184 static int
1185 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1186 {
1187 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1188 	struct rte_eth_link link, old;
1189 	u32 fc = AQ_NIC_FC_OFF;
1190 	int err = 0;
1191 
1192 	link.link_status = ETH_LINK_DOWN;
1193 	link.link_speed = 0;
1194 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1195 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1196 	memset(&old, 0, sizeof(old));
1197 
1198 	/* load old link status */
1199 	rte_eth_linkstatus_get(dev, &old);
1200 
1201 	/* read current link status */
1202 	err = hw->aq_fw_ops->update_link_status(hw);
1203 
1204 	if (err)
1205 		return 0;
1206 
1207 	if (hw->aq_link_status.mbps == 0) {
1208 		/* write default (down) link status */
1209 		rte_eth_linkstatus_set(dev, &link);
1210 		if (link.link_status == old.link_status)
1211 			return -1;
1212 		return 0;
1213 	}
1214 
1215 	link.link_status = ETH_LINK_UP;
1216 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1217 	link.link_speed = hw->aq_link_status.mbps;
1218 
1219 	rte_eth_linkstatus_set(dev, &link);
1220 
1221 	if (link.link_status == old.link_status)
1222 		return -1;
1223 
1224 	/* Driver has to update flow control settings on RX block
1225 	 * on any link event.
1226 	 * We should query FW whether it negotiated FC.
1227 	 */
1228 	if (hw->aq_fw_ops->get_flow_control) {
1229 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1230 		hw_atl_b0_set_fc(hw, fc, 0U);
1231 	}
1232 
1233 	if (rte_eal_alarm_set(1000 * 1000,
1234 			      atl_dev_delayed_handler, (void *)dev) < 0)
1235 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1236 
1237 	return 0;
1238 }
1239 
1240 static int
1241 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1242 {
1243 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1244 
1245 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1246 
1247 	return 0;
1248 }
1249 
1250 static int
1251 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1252 {
1253 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1254 
1255 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1256 
1257 	return 0;
1258 }
1259 
1260 static int
1261 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1262 {
1263 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1264 
1265 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1266 
1267 	return 0;
1268 }
1269 
1270 static int
1271 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1272 {
1273 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1274 
1275 	if (dev->data->promiscuous == 1)
1276 		return 0; /* must remain in all_multicast mode */
1277 
1278 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1279 
1280 	return 0;
1281 }
1282 
1283 /**
1284  * It clears the interrupt causes and enables the interrupt.
1285  * It will be called once only during nic initialized.
1286  *
1287  * @param dev
1288  *  Pointer to struct rte_eth_dev.
1289  * @param on
1290  *  Enable or Disable.
1291  *
1292  * @return
1293  *  - On success, zero.
1294  *  - On failure, a negative value.
1295  */
1296 
1297 static int
1298 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1299 {
1300 	atl_dev_link_status_print(dev);
1301 	return 0;
1302 }
1303 
1304 static int
1305 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1306 {
1307 	return 0;
1308 }
1309 
1310 
1311 static int
1312 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1313 {
1314 	struct atl_interrupt *intr =
1315 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1316 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1317 	u64 cause = 0;
1318 
1319 	hw_atl_b0_hw_irq_read(hw, &cause);
1320 
1321 	atl_disable_intr(hw);
1322 
1323 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1324 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1325 
1326 	return 0;
1327 }
1328 
1329 /**
1330  * It gets and then prints the link status.
1331  *
1332  * @param dev
1333  *  Pointer to struct rte_eth_dev.
1334  *
1335  * @return
1336  *  - On success, zero.
1337  *  - On failure, a negative value.
1338  */
1339 static void
1340 atl_dev_link_status_print(struct rte_eth_dev *dev)
1341 {
1342 	struct rte_eth_link link;
1343 
1344 	memset(&link, 0, sizeof(link));
1345 	rte_eth_linkstatus_get(dev, &link);
1346 	if (link.link_status) {
1347 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1348 					(int)(dev->data->port_id),
1349 					(unsigned int)link.link_speed,
1350 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1351 					"full-duplex" : "half-duplex");
1352 	} else {
1353 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1354 				(int)(dev->data->port_id));
1355 	}
1356 
1357 
1358 #ifdef DEBUG
1359 {
1360 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1361 
1362 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1363 				pci_dev->addr.domain,
1364 				pci_dev->addr.bus,
1365 				pci_dev->addr.devid,
1366 				pci_dev->addr.function);
1367 }
1368 #endif
1369 
1370 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1371 }
1372 
1373 /*
1374  * It executes link_update after knowing an interrupt occurred.
1375  *
1376  * @param dev
1377  *  Pointer to struct rte_eth_dev.
1378  *
1379  * @return
1380  *  - On success, zero.
1381  *  - On failure, a negative value.
1382  */
1383 static int
1384 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1385 			   struct rte_intr_handle *intr_handle)
1386 {
1387 	struct atl_interrupt *intr =
1388 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1389 	struct atl_adapter *adapter = dev->data->dev_private;
1390 	struct aq_hw_s *hw = &adapter->hw;
1391 
1392 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1393 		goto done;
1394 
1395 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1396 
1397 	/* Notify userapp if link status changed */
1398 	if (!atl_dev_link_update(dev, 0)) {
1399 		atl_dev_link_status_print(dev);
1400 		_rte_eth_dev_callback_process(dev,
1401 			RTE_ETH_EVENT_INTR_LSC, NULL);
1402 	} else {
1403 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1404 			goto done;
1405 
1406 		/* Check macsec Keys expired */
1407 		struct get_stats req = { 0 };
1408 		struct macsec_msg_fw_request msg = { 0 };
1409 		struct macsec_msg_fw_response resp = { 0 };
1410 
1411 		req.ingress_sa_index = 0x0;
1412 		req.egress_sc_index = 0x0;
1413 		req.egress_sa_index = 0x0;
1414 		msg.msg_type = macsec_get_stats_msg;
1415 		msg.stats = req;
1416 
1417 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1418 		if (err) {
1419 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1420 			goto done;
1421 		}
1422 		if (resp.stats.egress_threshold_expired ||
1423 		    resp.stats.ingress_threshold_expired ||
1424 		    resp.stats.egress_expired ||
1425 		    resp.stats.ingress_expired) {
1426 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1427 			_rte_eth_dev_callback_process(dev,
1428 				RTE_ETH_EVENT_MACSEC, NULL);
1429 		}
1430 	}
1431 done:
1432 	atl_enable_intr(dev);
1433 	rte_intr_ack(intr_handle);
1434 
1435 	return 0;
1436 }
1437 
1438 /**
1439  * Interrupt handler triggered by NIC  for handling
1440  * specific interrupt.
1441  *
1442  * @param handle
1443  *  Pointer to interrupt handle.
1444  * @param param
1445  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1446  *
1447  * @return
1448  *  void
1449  */
1450 static void
1451 atl_dev_interrupt_handler(void *param)
1452 {
1453 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1454 
1455 	atl_dev_interrupt_get_status(dev);
1456 	atl_dev_interrupt_action(dev, dev->intr_handle);
1457 }
1458 
1459 
1460 static int
1461 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1462 {
1463 	return SFP_EEPROM_SIZE;
1464 }
1465 
1466 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1467 		       struct rte_dev_eeprom_info *eeprom)
1468 {
1469 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1470 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1471 
1472 	if (hw->aq_fw_ops->get_eeprom == NULL)
1473 		return -ENOTSUP;
1474 
1475 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1476 	    eeprom->data == NULL)
1477 		return -EINVAL;
1478 
1479 	if (eeprom->magic > 0x7F)
1480 		return -EINVAL;
1481 
1482 	if (eeprom->magic)
1483 		dev_addr = eeprom->magic;
1484 
1485 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1486 					 eeprom->length, eeprom->offset);
1487 }
1488 
1489 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1490 		       struct rte_dev_eeprom_info *eeprom)
1491 {
1492 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1493 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1494 
1495 	if (hw->aq_fw_ops->set_eeprom == NULL)
1496 		return -ENOTSUP;
1497 
1498 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1499 	    eeprom->data == NULL)
1500 		return -EINVAL;
1501 
1502 	if (eeprom->magic > 0x7F)
1503 		return -EINVAL;
1504 
1505 	if (eeprom->magic)
1506 		dev_addr = eeprom->magic;
1507 
1508 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1509 					 eeprom->length, eeprom->offset);
1510 }
1511 
1512 static int
1513 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1514 {
1515 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1516 	u32 mif_id;
1517 	int err;
1518 
1519 	if (regs->data == NULL) {
1520 		regs->length = hw_atl_utils_hw_get_reg_length();
1521 		regs->width = sizeof(u32);
1522 		return 0;
1523 	}
1524 
1525 	/* Only full register dump is supported */
1526 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1527 		return -ENOTSUP;
1528 
1529 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1530 
1531 	/* Device version */
1532 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1533 	regs->version = mif_id & 0xFFU;
1534 
1535 	return err;
1536 }
1537 
1538 static int
1539 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1540 {
1541 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1542 	u32 fc = AQ_NIC_FC_OFF;
1543 
1544 	if (hw->aq_fw_ops->get_flow_control == NULL)
1545 		return -ENOTSUP;
1546 
1547 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1548 
1549 	if (fc == AQ_NIC_FC_OFF)
1550 		fc_conf->mode = RTE_FC_NONE;
1551 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1552 		fc_conf->mode = RTE_FC_FULL;
1553 	else if (fc & AQ_NIC_FC_RX)
1554 		fc_conf->mode = RTE_FC_RX_PAUSE;
1555 	else if (fc & AQ_NIC_FC_TX)
1556 		fc_conf->mode = RTE_FC_TX_PAUSE;
1557 
1558 	return 0;
1559 }
1560 
1561 static int
1562 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1563 {
1564 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1565 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1566 
1567 
1568 	if (hw->aq_fw_ops->set_flow_control == NULL)
1569 		return -ENOTSUP;
1570 
1571 	if (fc_conf->mode == RTE_FC_NONE)
1572 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1573 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1574 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1575 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1576 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1577 	else if (fc_conf->mode == RTE_FC_FULL)
1578 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1579 
1580 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1581 		return hw->aq_fw_ops->set_flow_control(hw);
1582 
1583 	return 0;
1584 }
1585 
1586 static int
1587 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1588 		    u8 *mac_addr, bool enable)
1589 {
1590 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1591 	unsigned int h = 0U;
1592 	unsigned int l = 0U;
1593 	int err;
1594 
1595 	if (mac_addr) {
1596 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1597 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1598 			(mac_addr[4] << 8) | mac_addr[5];
1599 	}
1600 
1601 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1602 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1603 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1604 
1605 	if (enable)
1606 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1607 
1608 	err = aq_hw_err_from_flags(hw);
1609 
1610 	return err;
1611 }
1612 
1613 static int
1614 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1615 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1616 {
1617 	if (rte_is_zero_ether_addr(mac_addr)) {
1618 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1619 		return -EINVAL;
1620 	}
1621 
1622 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1623 }
1624 
1625 static void
1626 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1627 {
1628 	atl_update_mac_addr(dev, index, NULL, false);
1629 }
1630 
1631 static int
1632 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1633 {
1634 	atl_remove_mac_addr(dev, 0);
1635 	atl_add_mac_addr(dev, addr, 0, 0);
1636 	return 0;
1637 }
1638 
1639 static int
1640 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1641 {
1642 	struct rte_eth_dev_info dev_info;
1643 	int ret;
1644 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1645 
1646 	ret = atl_dev_info_get(dev, &dev_info);
1647 	if (ret != 0)
1648 		return ret;
1649 
1650 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1651 		return -EINVAL;
1652 
1653 	/* update max frame size */
1654 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1655 
1656 	return 0;
1657 }
1658 
1659 static int
1660 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1661 {
1662 	struct aq_hw_cfg_s *cfg =
1663 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1664 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1665 	int err = 0;
1666 	int i = 0;
1667 
1668 	PMD_INIT_FUNC_TRACE();
1669 
1670 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1671 		if (cfg->vlan_filter[i] == vlan_id) {
1672 			if (!on) {
1673 				/* Disable VLAN filter. */
1674 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1675 
1676 				/* Clear VLAN filter entry */
1677 				cfg->vlan_filter[i] = 0;
1678 			}
1679 			break;
1680 		}
1681 	}
1682 
1683 	/* VLAN_ID was not found. So, nothing to delete. */
1684 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1685 		goto exit;
1686 
1687 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1688 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1689 		goto exit;
1690 
1691 	/* Try to found free VLAN filter to add new VLAN_ID */
1692 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1693 		if (cfg->vlan_filter[i] == 0)
1694 			break;
1695 	}
1696 
1697 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1698 		/* We have no free VLAN filter to add new VLAN_ID*/
1699 		err = -ENOMEM;
1700 		goto exit;
1701 	}
1702 
1703 	cfg->vlan_filter[i] = vlan_id;
1704 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1705 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1706 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1707 
1708 exit:
1709 	/* Enable VLAN promisc mode if vlan_filter empty  */
1710 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1711 		if (cfg->vlan_filter[i] != 0)
1712 			break;
1713 	}
1714 
1715 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1716 
1717 	return err;
1718 }
1719 
1720 static int
1721 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1722 {
1723 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1724 	struct aq_hw_cfg_s *cfg =
1725 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1726 	int i;
1727 
1728 	PMD_INIT_FUNC_TRACE();
1729 
1730 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1731 		if (cfg->vlan_filter[i])
1732 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1733 	}
1734 	return 0;
1735 }
1736 
1737 static int
1738 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1739 {
1740 	struct aq_hw_cfg_s *cfg =
1741 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1742 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1743 	int ret = 0;
1744 	int i;
1745 
1746 	PMD_INIT_FUNC_TRACE();
1747 
1748 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1749 
1750 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1751 
1752 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1753 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1754 
1755 	if (mask & ETH_VLAN_EXTEND_MASK)
1756 		ret = -ENOTSUP;
1757 
1758 	return ret;
1759 }
1760 
1761 static int
1762 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1763 		  uint16_t tpid)
1764 {
1765 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1766 	int err = 0;
1767 
1768 	PMD_INIT_FUNC_TRACE();
1769 
1770 	switch (vlan_type) {
1771 	case ETH_VLAN_TYPE_INNER:
1772 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1773 		break;
1774 	case ETH_VLAN_TYPE_OUTER:
1775 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1776 		break;
1777 	default:
1778 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1779 		err = -ENOTSUP;
1780 	}
1781 
1782 	return err;
1783 }
1784 
1785 static void
1786 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1787 {
1788 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1789 
1790 	PMD_INIT_FUNC_TRACE();
1791 
1792 	if (queue_id > dev->data->nb_rx_queues) {
1793 		PMD_DRV_LOG(ERR, "Invalid queue id");
1794 		return;
1795 	}
1796 
1797 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1798 }
1799 
1800 static int
1801 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1802 			  struct rte_ether_addr *mc_addr_set,
1803 			  uint32_t nb_mc_addr)
1804 {
1805 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1806 	u32 i;
1807 
1808 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1809 		return -EINVAL;
1810 
1811 	/* Update whole uc filters table */
1812 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1813 		u8 *mac_addr = NULL;
1814 		u32 l = 0, h = 0;
1815 
1816 		if (i < nb_mc_addr) {
1817 			mac_addr = mc_addr_set[i].addr_bytes;
1818 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1819 				(mac_addr[4] << 8) | mac_addr[5];
1820 			h = (mac_addr[0] << 8) | mac_addr[1];
1821 		}
1822 
1823 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1824 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1825 							HW_ATL_B0_MAC_MIN + i);
1826 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1827 							HW_ATL_B0_MAC_MIN + i);
1828 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1829 					   HW_ATL_B0_MAC_MIN + i);
1830 	}
1831 
1832 	return 0;
1833 }
1834 
1835 static int
1836 atl_reta_update(struct rte_eth_dev *dev,
1837 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1838 		   uint16_t reta_size)
1839 {
1840 	int i;
1841 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1842 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1843 
1844 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1845 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1846 					dev->data->nb_rx_queues - 1);
1847 
1848 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1849 	return 0;
1850 }
1851 
1852 static int
1853 atl_reta_query(struct rte_eth_dev *dev,
1854 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1855 		    uint16_t reta_size)
1856 {
1857 	int i;
1858 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1859 
1860 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1861 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1862 	reta_conf->mask = ~0U;
1863 	return 0;
1864 }
1865 
1866 static int
1867 atl_rss_hash_update(struct rte_eth_dev *dev,
1868 				 struct rte_eth_rss_conf *rss_conf)
1869 {
1870 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1871 	struct aq_hw_cfg_s *cfg =
1872 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1873 	static u8 def_rss_key[40] = {
1874 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1875 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1876 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1877 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1878 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1879 	};
1880 
1881 	cfg->is_rss = !!rss_conf->rss_hf;
1882 	if (rss_conf->rss_key) {
1883 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1884 		       rss_conf->rss_key_len);
1885 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1886 	} else {
1887 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1888 		       sizeof(def_rss_key));
1889 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1890 	}
1891 
1892 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1893 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1894 	return 0;
1895 }
1896 
1897 static int
1898 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1899 				 struct rte_eth_rss_conf *rss_conf)
1900 {
1901 	struct aq_hw_cfg_s *cfg =
1902 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1903 
1904 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1905 	if (rss_conf->rss_key) {
1906 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1907 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1908 		       rss_conf->rss_key_len);
1909 	}
1910 
1911 	return 0;
1912 }
1913 
1914 static bool
1915 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1916 {
1917 	if (strcmp(dev->device->driver->name, drv->driver.name))
1918 		return false;
1919 
1920 	return true;
1921 }
1922 
1923 bool
1924 is_atlantic_supported(struct rte_eth_dev *dev)
1925 {
1926 	return is_device_supported(dev, &rte_atl_pmd);
1927 }
1928 
1929 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1930 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1931 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1932 
1933 RTE_INIT(atl_init_log)
1934 {
1935 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1936 	if (atl_logtype_init >= 0)
1937 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1938 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1939 	if (atl_logtype_driver >= 0)
1940 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1941 }
1942