xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 				    struct rte_eth_xstat_name *xstats_names,
35 				    unsigned int size);
36 
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 				struct rte_eth_stats *stats);
39 
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 			      struct rte_eth_xstat *stats, unsigned int n);
42 
43 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
44 
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 			      size_t fw_size);
47 
48 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
49 
50 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
51 
52 /* VLAN stuff */
53 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
54 		uint16_t vlan_id, int on);
55 
56 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
57 
58 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
59 				     uint16_t queue_id, int on);
60 
61 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
62 			     enum rte_vlan_type vlan_type, uint16_t tpid);
63 
64 /* EEPROM */
65 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
66 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
67 			      struct rte_dev_eeprom_info *eeprom);
68 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
69 			      struct rte_dev_eeprom_info *eeprom);
70 
71 /* Regs */
72 static int atl_dev_get_regs(struct rte_eth_dev *dev,
73 			    struct rte_dev_reg_info *regs);
74 
75 /* Flow control */
76 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
77 			       struct rte_eth_fc_conf *fc_conf);
78 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
79 			       struct rte_eth_fc_conf *fc_conf);
80 
81 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
82 
83 /* Interrupts */
84 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
85 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
86 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
87 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
88 				    struct rte_intr_handle *handle);
89 static void atl_dev_interrupt_handler(void *param);
90 
91 
92 static int atl_add_mac_addr(struct rte_eth_dev *dev,
93 			    struct rte_ether_addr *mac_addr,
94 			    uint32_t index, uint32_t pool);
95 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
96 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
97 					   struct rte_ether_addr *mac_addr);
98 
99 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
100 				    struct rte_ether_addr *mc_addr_set,
101 				    uint32_t nb_mc_addr);
102 
103 /* RSS */
104 static int atl_reta_update(struct rte_eth_dev *dev,
105 			     struct rte_eth_rss_reta_entry64 *reta_conf,
106 			     uint16_t reta_size);
107 static int atl_reta_query(struct rte_eth_dev *dev,
108 			    struct rte_eth_rss_reta_entry64 *reta_conf,
109 			    uint16_t reta_size);
110 static int atl_rss_hash_update(struct rte_eth_dev *dev,
111 				 struct rte_eth_rss_conf *rss_conf);
112 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
113 				   struct rte_eth_rss_conf *rss_conf);
114 
115 
116 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
117 	struct rte_pci_device *pci_dev);
118 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
119 
120 static int atl_dev_info_get(struct rte_eth_dev *dev,
121 				struct rte_eth_dev_info *dev_info);
122 
123 int atl_logtype_init;
124 int atl_logtype_driver;
125 
126 /*
127  * The set of PCI devices this driver supports
128  */
129 static const struct rte_pci_id pci_id_atl_map[] = {
130 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
135 
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
142 
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
149 
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
152 	{ .vendor_id = 0, /* sentinel */ },
153 };
154 
155 static struct rte_pci_driver rte_atl_pmd = {
156 	.id_table = pci_id_atl_map,
157 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
158 	.probe = eth_atl_pci_probe,
159 	.remove = eth_atl_pci_remove,
160 };
161 
162 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
163 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
164 			| DEV_RX_OFFLOAD_UDP_CKSUM \
165 			| DEV_RX_OFFLOAD_TCP_CKSUM \
166 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
167 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
168 			| DEV_RX_OFFLOAD_VLAN_FILTER)
169 
170 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
171 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
172 			| DEV_TX_OFFLOAD_UDP_CKSUM \
173 			| DEV_TX_OFFLOAD_TCP_CKSUM \
174 			| DEV_TX_OFFLOAD_TCP_TSO \
175 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
176 			| DEV_TX_OFFLOAD_MULTI_SEGS)
177 
178 #define SFP_EEPROM_SIZE 0x100
179 
180 static const struct rte_eth_desc_lim rx_desc_lim = {
181 	.nb_max = ATL_MAX_RING_DESC,
182 	.nb_min = ATL_MIN_RING_DESC,
183 	.nb_align = ATL_RXD_ALIGN,
184 };
185 
186 static const struct rte_eth_desc_lim tx_desc_lim = {
187 	.nb_max = ATL_MAX_RING_DESC,
188 	.nb_min = ATL_MIN_RING_DESC,
189 	.nb_align = ATL_TXD_ALIGN,
190 	.nb_seg_max = ATL_TX_MAX_SEG,
191 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
192 };
193 
194 enum atl_xstats_type {
195 	XSTATS_TYPE_MSM = 0,
196 	XSTATS_TYPE_MACSEC,
197 };
198 
199 #define ATL_XSTATS_FIELD(name) { \
200 	#name, \
201 	offsetof(struct aq_stats_s, name), \
202 	XSTATS_TYPE_MSM \
203 }
204 
205 #define ATL_MACSEC_XSTATS_FIELD(name) { \
206 	#name, \
207 	offsetof(struct macsec_stats, name), \
208 	XSTATS_TYPE_MACSEC \
209 }
210 
211 struct atl_xstats_tbl_s {
212 	const char *name;
213 	unsigned int offset;
214 	enum atl_xstats_type type;
215 };
216 
217 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
218 	ATL_XSTATS_FIELD(uprc),
219 	ATL_XSTATS_FIELD(mprc),
220 	ATL_XSTATS_FIELD(bprc),
221 	ATL_XSTATS_FIELD(erpt),
222 	ATL_XSTATS_FIELD(uptc),
223 	ATL_XSTATS_FIELD(mptc),
224 	ATL_XSTATS_FIELD(bptc),
225 	ATL_XSTATS_FIELD(erpr),
226 	ATL_XSTATS_FIELD(ubrc),
227 	ATL_XSTATS_FIELD(ubtc),
228 	ATL_XSTATS_FIELD(mbrc),
229 	ATL_XSTATS_FIELD(mbtc),
230 	ATL_XSTATS_FIELD(bbrc),
231 	ATL_XSTATS_FIELD(bbtc),
232 	/* Ingress Common Counters */
233 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
234 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
235 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
236 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
237 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
239 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
241 	/* Ingress SA Counters */
242 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
244 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
245 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
246 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
247 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
250 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
251 	/* Egress Common Counters */
252 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
254 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
255 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
256 	/* Egress SC Counters */
257 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
259 	/* Egress SA Counters */
260 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
261 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
262 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
263 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
264 };
265 
266 static const struct eth_dev_ops atl_eth_dev_ops = {
267 	.dev_configure	      = atl_dev_configure,
268 	.dev_start	      = atl_dev_start,
269 	.dev_stop	      = atl_dev_stop,
270 	.dev_set_link_up      = atl_dev_set_link_up,
271 	.dev_set_link_down    = atl_dev_set_link_down,
272 	.dev_close	      = atl_dev_close,
273 	.dev_reset	      = atl_dev_reset,
274 
275 	/* PROMISC */
276 	.promiscuous_enable   = atl_dev_promiscuous_enable,
277 	.promiscuous_disable  = atl_dev_promiscuous_disable,
278 	.allmulticast_enable  = atl_dev_allmulticast_enable,
279 	.allmulticast_disable = atl_dev_allmulticast_disable,
280 
281 	/* Link */
282 	.link_update	      = atl_dev_link_update,
283 
284 	.get_reg              = atl_dev_get_regs,
285 
286 	/* Stats */
287 	.stats_get	      = atl_dev_stats_get,
288 	.xstats_get	      = atl_dev_xstats_get,
289 	.xstats_get_names     = atl_dev_xstats_get_names,
290 	.stats_reset	      = atl_dev_stats_reset,
291 	.xstats_reset	      = atl_dev_stats_reset,
292 
293 	.fw_version_get       = atl_fw_version_get,
294 	.dev_infos_get	      = atl_dev_info_get,
295 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
296 
297 	.mtu_set              = atl_dev_mtu_set,
298 
299 	/* VLAN */
300 	.vlan_filter_set      = atl_vlan_filter_set,
301 	.vlan_offload_set     = atl_vlan_offload_set,
302 	.vlan_tpid_set        = atl_vlan_tpid_set,
303 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
304 
305 	/* Queue Control */
306 	.rx_queue_start	      = atl_rx_queue_start,
307 	.rx_queue_stop	      = atl_rx_queue_stop,
308 	.rx_queue_setup       = atl_rx_queue_setup,
309 	.rx_queue_release     = atl_rx_queue_release,
310 
311 	.tx_queue_start	      = atl_tx_queue_start,
312 	.tx_queue_stop	      = atl_tx_queue_stop,
313 	.tx_queue_setup       = atl_tx_queue_setup,
314 	.tx_queue_release     = atl_tx_queue_release,
315 
316 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
317 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
318 
319 	.rx_queue_count       = atl_rx_queue_count,
320 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
321 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
322 
323 	/* EEPROM */
324 	.get_eeprom_length    = atl_dev_get_eeprom_length,
325 	.get_eeprom           = atl_dev_get_eeprom,
326 	.set_eeprom           = atl_dev_set_eeprom,
327 
328 	/* Flow Control */
329 	.flow_ctrl_get	      = atl_flow_ctrl_get,
330 	.flow_ctrl_set	      = atl_flow_ctrl_set,
331 
332 	/* MAC */
333 	.mac_addr_add	      = atl_add_mac_addr,
334 	.mac_addr_remove      = atl_remove_mac_addr,
335 	.mac_addr_set	      = atl_set_default_mac_addr,
336 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
337 	.rxq_info_get	      = atl_rxq_info_get,
338 	.txq_info_get	      = atl_txq_info_get,
339 
340 	.reta_update          = atl_reta_update,
341 	.reta_query           = atl_reta_query,
342 	.rss_hash_update      = atl_rss_hash_update,
343 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
344 };
345 
346 static inline int32_t
347 atl_reset_hw(struct aq_hw_s *hw)
348 {
349 	return hw_atl_b0_hw_reset(hw);
350 }
351 
352 static inline void
353 atl_enable_intr(struct rte_eth_dev *dev)
354 {
355 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
356 
357 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
358 }
359 
360 static void
361 atl_disable_intr(struct aq_hw_s *hw)
362 {
363 	PMD_INIT_FUNC_TRACE();
364 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
365 }
366 
367 static int
368 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
369 {
370 	struct atl_adapter *adapter = eth_dev->data->dev_private;
371 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
372 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
373 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
374 	int err = 0;
375 
376 	PMD_INIT_FUNC_TRACE();
377 
378 	eth_dev->dev_ops = &atl_eth_dev_ops;
379 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
380 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
381 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
382 
383 	/* For secondary processes, the primary process has done all the work */
384 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
385 		return 0;
386 
387 	/* Vendor and Device ID need to be set before init of shared code */
388 	hw->device_id = pci_dev->id.device_id;
389 	hw->vendor_id = pci_dev->id.vendor_id;
390 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
391 
392 	/* Hardware configuration - hardcode */
393 	adapter->hw_cfg.is_lro = false;
394 	adapter->hw_cfg.wol = false;
395 	adapter->hw_cfg.is_rss = false;
396 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
397 
398 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
399 			  AQ_NIC_RATE_5G |
400 			  AQ_NIC_RATE_2G5 |
401 			  AQ_NIC_RATE_1G |
402 			  AQ_NIC_RATE_100M;
403 
404 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
405 	adapter->hw_cfg.aq_rss.indirection_table_size =
406 		HW_ATL_B0_RSS_REDIRECTION_MAX;
407 
408 	hw->aq_nic_cfg = &adapter->hw_cfg;
409 
410 	/* disable interrupt */
411 	atl_disable_intr(hw);
412 
413 	/* Allocate memory for storing MAC addresses */
414 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415 					RTE_ETHER_ADDR_LEN, 0);
416 	if (eth_dev->data->mac_addrs == NULL) {
417 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
418 		return -ENOMEM;
419 	}
420 
421 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
422 	if (err)
423 		return err;
424 
425 	/* Copy the permanent MAC address */
426 	if (hw->aq_fw_ops->get_mac_permanent(hw,
427 			eth_dev->data->mac_addrs->addr_bytes) != 0)
428 		return -EINVAL;
429 
430 	/* Reset the hw statistics */
431 	atl_dev_stats_reset(eth_dev);
432 
433 	rte_intr_callback_register(intr_handle,
434 				   atl_dev_interrupt_handler, eth_dev);
435 
436 	/* enable uio/vfio intr/eventfd mapping */
437 	rte_intr_enable(intr_handle);
438 
439 	/* enable support intr */
440 	atl_enable_intr(eth_dev);
441 
442 	return err;
443 }
444 
445 static int
446 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
447 {
448 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
449 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
450 	struct aq_hw_s *hw;
451 
452 	PMD_INIT_FUNC_TRACE();
453 
454 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
455 		return -EPERM;
456 
457 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
458 
459 	if (hw->adapter_stopped == 0)
460 		atl_dev_close(eth_dev);
461 
462 	eth_dev->dev_ops = NULL;
463 	eth_dev->rx_pkt_burst = NULL;
464 	eth_dev->tx_pkt_burst = NULL;
465 
466 	/* disable uio intr before callback unregister */
467 	rte_intr_disable(intr_handle);
468 	rte_intr_callback_unregister(intr_handle,
469 				     atl_dev_interrupt_handler, eth_dev);
470 
471 	rte_free(eth_dev->data->mac_addrs);
472 	eth_dev->data->mac_addrs = NULL;
473 
474 	return 0;
475 }
476 
477 static int
478 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
479 	struct rte_pci_device *pci_dev)
480 {
481 	return rte_eth_dev_pci_generic_probe(pci_dev,
482 		sizeof(struct atl_adapter), eth_atl_dev_init);
483 }
484 
485 static int
486 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
487 {
488 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
489 }
490 
491 static int
492 atl_dev_configure(struct rte_eth_dev *dev)
493 {
494 	struct atl_interrupt *intr =
495 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
496 
497 	PMD_INIT_FUNC_TRACE();
498 
499 	/* set flag to update link status after init */
500 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
501 
502 	return 0;
503 }
504 
505 /*
506  * Configure device link speed and setup link.
507  * It returns 0 on success.
508  */
509 static int
510 atl_dev_start(struct rte_eth_dev *dev)
511 {
512 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
513 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
514 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
515 	uint32_t intr_vector = 0;
516 	int status;
517 	int err;
518 
519 	PMD_INIT_FUNC_TRACE();
520 
521 	/* set adapter started */
522 	hw->adapter_stopped = 0;
523 
524 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
525 		PMD_INIT_LOG(ERR,
526 		"Invalid link_speeds for port %u, fix speed not supported",
527 				dev->data->port_id);
528 		return -EINVAL;
529 	}
530 
531 	/* disable uio/vfio intr/eventfd mapping */
532 	rte_intr_disable(intr_handle);
533 
534 	/* reinitialize adapter
535 	 * this calls reset and start
536 	 */
537 	status = atl_reset_hw(hw);
538 	if (status != 0)
539 		return -EIO;
540 
541 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
542 
543 	hw_atl_b0_hw_start(hw);
544 	/* check and configure queue intr-vector mapping */
545 	if ((rte_intr_cap_multiple(intr_handle) ||
546 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
547 	    dev->data->dev_conf.intr_conf.rxq != 0) {
548 		intr_vector = dev->data->nb_rx_queues;
549 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
550 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
551 					ATL_MAX_INTR_QUEUE_NUM);
552 			return -ENOTSUP;
553 		}
554 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
555 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
556 			return -1;
557 		}
558 	}
559 
560 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
561 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
562 				    dev->data->nb_rx_queues * sizeof(int), 0);
563 		if (intr_handle->intr_vec == NULL) {
564 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
565 				     " intr_vec", dev->data->nb_rx_queues);
566 			return -ENOMEM;
567 		}
568 	}
569 
570 	/* initialize transmission unit */
571 	atl_tx_init(dev);
572 
573 	/* This can fail when allocating mbufs for descriptor rings */
574 	err = atl_rx_init(dev);
575 	if (err) {
576 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
577 		goto error;
578 	}
579 
580 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
581 		hw->fw_ver_actual >> 24,
582 		(hw->fw_ver_actual >> 16) & 0xFF,
583 		hw->fw_ver_actual & 0xFFFF);
584 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
585 
586 	err = atl_start_queues(dev);
587 	if (err < 0) {
588 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
589 		goto error;
590 	}
591 
592 	err = atl_dev_set_link_up(dev);
593 
594 	err = hw->aq_fw_ops->update_link_status(hw);
595 
596 	if (err)
597 		goto error;
598 
599 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
600 
601 	if (rte_intr_allow_others(intr_handle)) {
602 		/* check if lsc interrupt is enabled */
603 		if (dev->data->dev_conf.intr_conf.lsc != 0)
604 			atl_dev_lsc_interrupt_setup(dev, true);
605 		else
606 			atl_dev_lsc_interrupt_setup(dev, false);
607 	} else {
608 		rte_intr_callback_unregister(intr_handle,
609 					     atl_dev_interrupt_handler, dev);
610 		if (dev->data->dev_conf.intr_conf.lsc != 0)
611 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
612 				     " no intr multiplex");
613 	}
614 
615 	/* check if rxq interrupt is enabled */
616 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
617 	    rte_intr_dp_is_en(intr_handle))
618 		atl_dev_rxq_interrupt_setup(dev);
619 
620 	/* enable uio/vfio intr/eventfd mapping */
621 	rte_intr_enable(intr_handle);
622 
623 	/* resume enabled intr since hw reset */
624 	atl_enable_intr(dev);
625 
626 	return 0;
627 
628 error:
629 	atl_stop_queues(dev);
630 	return -EIO;
631 }
632 
633 /*
634  * Stop device: disable rx and tx functions to allow for reconfiguring.
635  */
636 static void
637 atl_dev_stop(struct rte_eth_dev *dev)
638 {
639 	struct rte_eth_link link;
640 	struct aq_hw_s *hw =
641 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
643 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
644 
645 	PMD_INIT_FUNC_TRACE();
646 
647 	/* disable interrupts */
648 	atl_disable_intr(hw);
649 
650 	/* reset the NIC */
651 	atl_reset_hw(hw);
652 	hw->adapter_stopped = 1;
653 
654 	atl_stop_queues(dev);
655 
656 	/* Clear stored conf */
657 	dev->data->scattered_rx = 0;
658 	dev->data->lro = 0;
659 
660 	/* Clear recorded link status */
661 	memset(&link, 0, sizeof(link));
662 	rte_eth_linkstatus_set(dev, &link);
663 
664 	if (!rte_intr_allow_others(intr_handle))
665 		/* resume to the default handler */
666 		rte_intr_callback_register(intr_handle,
667 					   atl_dev_interrupt_handler,
668 					   (void *)dev);
669 
670 	/* Clean datapath event and queue/vec mapping */
671 	rte_intr_efd_disable(intr_handle);
672 	if (intr_handle->intr_vec != NULL) {
673 		rte_free(intr_handle->intr_vec);
674 		intr_handle->intr_vec = NULL;
675 	}
676 }
677 
678 /*
679  * Set device link up: enable tx.
680  */
681 static int
682 atl_dev_set_link_up(struct rte_eth_dev *dev)
683 {
684 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
685 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
686 	uint32_t speed_mask = 0;
687 
688 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
689 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
690 	} else {
691 		if (link_speeds & ETH_LINK_SPEED_10G)
692 			speed_mask |= AQ_NIC_RATE_10G;
693 		if (link_speeds & ETH_LINK_SPEED_5G)
694 			speed_mask |= AQ_NIC_RATE_5G;
695 		if (link_speeds & ETH_LINK_SPEED_1G)
696 			speed_mask |= AQ_NIC_RATE_1G;
697 		if (link_speeds & ETH_LINK_SPEED_2_5G)
698 			speed_mask |=  AQ_NIC_RATE_2G5;
699 		if (link_speeds & ETH_LINK_SPEED_100M)
700 			speed_mask |= AQ_NIC_RATE_100M;
701 	}
702 
703 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
704 }
705 
706 /*
707  * Set device link down: disable tx.
708  */
709 static int
710 atl_dev_set_link_down(struct rte_eth_dev *dev)
711 {
712 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
713 
714 	return hw->aq_fw_ops->set_link_speed(hw, 0);
715 }
716 
717 /*
718  * Reset and stop device.
719  */
720 static void
721 atl_dev_close(struct rte_eth_dev *dev)
722 {
723 	PMD_INIT_FUNC_TRACE();
724 
725 	atl_dev_stop(dev);
726 
727 	atl_free_queues(dev);
728 }
729 
730 static int
731 atl_dev_reset(struct rte_eth_dev *dev)
732 {
733 	int ret;
734 
735 	ret = eth_atl_dev_uninit(dev);
736 	if (ret)
737 		return ret;
738 
739 	ret = eth_atl_dev_init(dev);
740 
741 	return ret;
742 }
743 
744 static int
745 atl_dev_configure_macsec(struct rte_eth_dev *dev)
746 {
747 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
748 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
749 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
750 	struct macsec_msg_fw_request msg_macsec;
751 	struct macsec_msg_fw_response response;
752 
753 	if (!aqcfg->common.macsec_enabled ||
754 	    hw->aq_fw_ops->send_macsec_req == NULL)
755 		return 0;
756 
757 	memset(&msg_macsec, 0, sizeof(msg_macsec));
758 
759 	/* Creating set of sc/sa structures from parameters provided by DPDK */
760 
761 	/* Configure macsec */
762 	msg_macsec.msg_type = macsec_cfg_msg;
763 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
764 	msg_macsec.cfg.interrupts_enabled = 1;
765 
766 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
767 
768 	if (response.result)
769 		return -1;
770 
771 	memset(&msg_macsec, 0, sizeof(msg_macsec));
772 
773 	/* Configure TX SC */
774 
775 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
776 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
777 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
778 
779 	/* MAC addr for TX */
780 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
781 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
782 	msg_macsec.txsc.sa_mask = 0x3f;
783 
784 	msg_macsec.txsc.da_mask = 0;
785 	msg_macsec.txsc.tci = 0x0B;
786 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
787 
788 	/*
789 	 * Creating SCI (Secure Channel Identifier).
790 	 * SCI constructed from Source MAC and Port identifier
791 	 */
792 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
793 			       (msg_macsec.txsc.mac_sa[0] >> 16);
794 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
795 
796 	uint32_t port_identifier = 1;
797 
798 	msg_macsec.txsc.sci[1] = sci_hi_part;
799 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
800 
801 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
802 
803 	if (response.result)
804 		return -1;
805 
806 	memset(&msg_macsec, 0, sizeof(msg_macsec));
807 
808 	/* Configure RX SC */
809 
810 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
811 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
812 	msg_macsec.rxsc.replay_protect =
813 		aqcfg->common.replay_protection_enabled;
814 	msg_macsec.rxsc.anti_replay_window = 0;
815 
816 	/* MAC addr for RX */
817 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
818 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
819 	msg_macsec.rxsc.da_mask = 0;//0x3f;
820 
821 	msg_macsec.rxsc.sa_mask = 0;
822 
823 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
824 
825 	if (response.result)
826 		return -1;
827 
828 	memset(&msg_macsec, 0, sizeof(msg_macsec));
829 
830 	/* Configure RX SC */
831 
832 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
833 	msg_macsec.txsa.index = aqcfg->txsa.idx;
834 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
835 
836 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
837 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
838 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
839 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
840 
841 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
842 
843 	if (response.result)
844 		return -1;
845 
846 	memset(&msg_macsec, 0, sizeof(msg_macsec));
847 
848 	/* Configure RX SA */
849 
850 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
851 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
852 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
853 
854 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
855 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
856 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
857 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
858 
859 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
860 
861 	if (response.result)
862 		return -1;
863 
864 	return 0;
865 }
866 
867 int atl_macsec_enable(struct rte_eth_dev *dev,
868 		      uint8_t encr, uint8_t repl_prot)
869 {
870 	struct aq_hw_cfg_s *cfg =
871 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
872 
873 	cfg->aq_macsec.common.macsec_enabled = 1;
874 	cfg->aq_macsec.common.encryption_enabled = encr;
875 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
876 
877 	return 0;
878 }
879 
880 int atl_macsec_disable(struct rte_eth_dev *dev)
881 {
882 	struct aq_hw_cfg_s *cfg =
883 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
884 
885 	cfg->aq_macsec.common.macsec_enabled = 0;
886 
887 	return 0;
888 }
889 
890 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
891 {
892 	struct aq_hw_cfg_s *cfg =
893 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
894 
895 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
896 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
897 		RTE_ETHER_ADDR_LEN);
898 
899 	return 0;
900 }
901 
902 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
903 			   uint8_t *mac, uint16_t pi)
904 {
905 	struct aq_hw_cfg_s *cfg =
906 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
907 
908 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
909 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
910 		RTE_ETHER_ADDR_LEN);
911 	cfg->aq_macsec.rxsc.pi = pi;
912 
913 	return 0;
914 }
915 
916 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
917 			   uint8_t idx, uint8_t an,
918 			   uint32_t pn, uint8_t *key)
919 {
920 	struct aq_hw_cfg_s *cfg =
921 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
922 
923 	cfg->aq_macsec.txsa.idx = idx;
924 	cfg->aq_macsec.txsa.pn = pn;
925 	cfg->aq_macsec.txsa.an = an;
926 
927 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
928 	return 0;
929 }
930 
931 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
932 			   uint8_t idx, uint8_t an,
933 			   uint32_t pn, uint8_t *key)
934 {
935 	struct aq_hw_cfg_s *cfg =
936 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
937 
938 	cfg->aq_macsec.rxsa.idx = idx;
939 	cfg->aq_macsec.rxsa.pn = pn;
940 	cfg->aq_macsec.rxsa.an = an;
941 
942 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
943 	return 0;
944 }
945 
946 static int
947 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
948 {
949 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
950 	struct aq_hw_s *hw = &adapter->hw;
951 	struct atl_sw_stats *swstats = &adapter->sw_stats;
952 	unsigned int i;
953 
954 	hw->aq_fw_ops->update_stats(hw);
955 
956 	/* Fill out the rte_eth_stats statistics structure */
957 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
958 	stats->ibytes = hw->curr_stats.dma_oct_rc;
959 	stats->imissed = hw->curr_stats.dpc;
960 	stats->ierrors = hw->curr_stats.erpt;
961 
962 	stats->opackets = hw->curr_stats.dma_pkt_tc;
963 	stats->obytes = hw->curr_stats.dma_oct_tc;
964 	stats->oerrors = 0;
965 
966 	stats->rx_nombuf = swstats->rx_nombuf;
967 
968 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
969 		stats->q_ipackets[i] = swstats->q_ipackets[i];
970 		stats->q_opackets[i] = swstats->q_opackets[i];
971 		stats->q_ibytes[i] = swstats->q_ibytes[i];
972 		stats->q_obytes[i] = swstats->q_obytes[i];
973 		stats->q_errors[i] = swstats->q_errors[i];
974 	}
975 	return 0;
976 }
977 
978 static void
979 atl_dev_stats_reset(struct rte_eth_dev *dev)
980 {
981 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
982 	struct aq_hw_s *hw = &adapter->hw;
983 
984 	hw->aq_fw_ops->update_stats(hw);
985 
986 	/* Reset software totals */
987 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
988 
989 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
990 }
991 
992 static int
993 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
994 			 struct rte_eth_xstat_name *xstats_names,
995 			 unsigned int size)
996 {
997 	unsigned int i;
998 
999 	if (!xstats_names)
1000 		return RTE_DIM(atl_xstats_tbl);
1001 
1002 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
1003 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
1004 			RTE_ETH_XSTATS_NAME_SIZE);
1005 
1006 	return i;
1007 }
1008 
1009 static int
1010 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1011 		   unsigned int n)
1012 {
1013 	struct atl_adapter *adapter = dev->data->dev_private;
1014 	struct aq_hw_s *hw = &adapter->hw;
1015 	struct get_stats req = { 0 };
1016 	struct macsec_msg_fw_request msg = { 0 };
1017 	struct macsec_msg_fw_response resp = { 0 };
1018 	int err = -1;
1019 	unsigned int i;
1020 
1021 	if (!stats)
1022 		return 0;
1023 
1024 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1025 		req.ingress_sa_index = 0xff;
1026 		req.egress_sc_index = 0xff;
1027 		req.egress_sa_index = 0xff;
1028 
1029 		msg.msg_type = macsec_get_stats_msg;
1030 		msg.stats = req;
1031 
1032 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1033 	}
1034 
1035 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
1036 		stats[i].id = i;
1037 
1038 		switch (atl_xstats_tbl[i].type) {
1039 		case XSTATS_TYPE_MSM:
1040 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1041 					 atl_xstats_tbl[i].offset);
1042 			break;
1043 		case XSTATS_TYPE_MACSEC:
1044 			if (err)
1045 				goto done;
1046 			stats[i].value = *(u64 *)((uint8_t *)&resp.stats +
1047 					 atl_xstats_tbl[i].offset);
1048 			break;
1049 		}
1050 	}
1051 done:
1052 	return i;
1053 }
1054 
1055 static int
1056 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1057 {
1058 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1059 	uint32_t fw_ver = 0;
1060 	unsigned int ret = 0;
1061 
1062 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1063 	if (ret)
1064 		return -EIO;
1065 
1066 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1067 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1068 
1069 	ret += 1; /* add string null-terminator */
1070 
1071 	if (fw_size < ret)
1072 		return ret;
1073 
1074 	return 0;
1075 }
1076 
1077 static int
1078 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1079 {
1080 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1081 
1082 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1083 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1084 
1085 	dev_info->min_rx_bufsize = 1024;
1086 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1087 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1088 	dev_info->max_vfs = pci_dev->max_vfs;
1089 
1090 	dev_info->max_hash_mac_addrs = 0;
1091 	dev_info->max_vmdq_pools = 0;
1092 	dev_info->vmdq_queue_num = 0;
1093 
1094 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1095 
1096 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1097 
1098 
1099 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1100 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1101 	};
1102 
1103 	dev_info->default_txconf = (struct rte_eth_txconf) {
1104 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1105 	};
1106 
1107 	dev_info->rx_desc_lim = rx_desc_lim;
1108 	dev_info->tx_desc_lim = tx_desc_lim;
1109 
1110 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1111 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1112 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1113 
1114 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1115 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1116 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1117 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1118 
1119 	return 0;
1120 }
1121 
1122 static const uint32_t *
1123 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1124 {
1125 	static const uint32_t ptypes[] = {
1126 		RTE_PTYPE_L2_ETHER,
1127 		RTE_PTYPE_L2_ETHER_ARP,
1128 		RTE_PTYPE_L2_ETHER_VLAN,
1129 		RTE_PTYPE_L3_IPV4,
1130 		RTE_PTYPE_L3_IPV6,
1131 		RTE_PTYPE_L4_TCP,
1132 		RTE_PTYPE_L4_UDP,
1133 		RTE_PTYPE_L4_SCTP,
1134 		RTE_PTYPE_L4_ICMP,
1135 		RTE_PTYPE_UNKNOWN
1136 	};
1137 
1138 	if (dev->rx_pkt_burst == atl_recv_pkts)
1139 		return ptypes;
1140 
1141 	return NULL;
1142 }
1143 
1144 static void
1145 atl_dev_delayed_handler(void *param)
1146 {
1147 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1148 
1149 	atl_dev_configure_macsec(dev);
1150 }
1151 
1152 
1153 /* return 0 means link status changed, -1 means not changed */
1154 static int
1155 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1156 {
1157 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1158 	struct rte_eth_link link, old;
1159 	u32 fc = AQ_NIC_FC_OFF;
1160 	int err = 0;
1161 
1162 	link.link_status = ETH_LINK_DOWN;
1163 	link.link_speed = 0;
1164 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1165 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1166 	memset(&old, 0, sizeof(old));
1167 
1168 	/* load old link status */
1169 	rte_eth_linkstatus_get(dev, &old);
1170 
1171 	/* read current link status */
1172 	err = hw->aq_fw_ops->update_link_status(hw);
1173 
1174 	if (err)
1175 		return 0;
1176 
1177 	if (hw->aq_link_status.mbps == 0) {
1178 		/* write default (down) link status */
1179 		rte_eth_linkstatus_set(dev, &link);
1180 		if (link.link_status == old.link_status)
1181 			return -1;
1182 		return 0;
1183 	}
1184 
1185 	link.link_status = ETH_LINK_UP;
1186 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1187 	link.link_speed = hw->aq_link_status.mbps;
1188 
1189 	rte_eth_linkstatus_set(dev, &link);
1190 
1191 	if (link.link_status == old.link_status)
1192 		return -1;
1193 
1194 	/* Driver has to update flow control settings on RX block
1195 	 * on any link event.
1196 	 * We should query FW whether it negotiated FC.
1197 	 */
1198 	if (hw->aq_fw_ops->get_flow_control) {
1199 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1200 		hw_atl_b0_set_fc(hw, fc, 0U);
1201 	}
1202 
1203 	if (rte_eal_alarm_set(1000 * 1000,
1204 			      atl_dev_delayed_handler, (void *)dev) < 0)
1205 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1206 
1207 	return 0;
1208 }
1209 
1210 static void
1211 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1212 {
1213 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1214 
1215 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1216 }
1217 
1218 static void
1219 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1220 {
1221 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1222 
1223 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1224 }
1225 
1226 static void
1227 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1228 {
1229 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230 
1231 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1232 }
1233 
1234 static void
1235 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1236 {
1237 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1238 
1239 	if (dev->data->promiscuous == 1)
1240 		return; /* must remain in all_multicast mode */
1241 
1242 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1243 }
1244 
1245 /**
1246  * It clears the interrupt causes and enables the interrupt.
1247  * It will be called once only during nic initialized.
1248  *
1249  * @param dev
1250  *  Pointer to struct rte_eth_dev.
1251  * @param on
1252  *  Enable or Disable.
1253  *
1254  * @return
1255  *  - On success, zero.
1256  *  - On failure, a negative value.
1257  */
1258 
1259 static int
1260 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1261 {
1262 	atl_dev_link_status_print(dev);
1263 	return 0;
1264 }
1265 
1266 static int
1267 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1268 {
1269 	return 0;
1270 }
1271 
1272 
1273 static int
1274 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1275 {
1276 	struct atl_interrupt *intr =
1277 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1278 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1279 	u64 cause = 0;
1280 
1281 	hw_atl_b0_hw_irq_read(hw, &cause);
1282 
1283 	atl_disable_intr(hw);
1284 
1285 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1286 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1287 
1288 	return 0;
1289 }
1290 
1291 /**
1292  * It gets and then prints the link status.
1293  *
1294  * @param dev
1295  *  Pointer to struct rte_eth_dev.
1296  *
1297  * @return
1298  *  - On success, zero.
1299  *  - On failure, a negative value.
1300  */
1301 static void
1302 atl_dev_link_status_print(struct rte_eth_dev *dev)
1303 {
1304 	struct rte_eth_link link;
1305 
1306 	memset(&link, 0, sizeof(link));
1307 	rte_eth_linkstatus_get(dev, &link);
1308 	if (link.link_status) {
1309 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1310 					(int)(dev->data->port_id),
1311 					(unsigned int)link.link_speed,
1312 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1313 					"full-duplex" : "half-duplex");
1314 	} else {
1315 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1316 				(int)(dev->data->port_id));
1317 	}
1318 
1319 
1320 #ifdef DEBUG
1321 {
1322 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1323 
1324 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1325 				pci_dev->addr.domain,
1326 				pci_dev->addr.bus,
1327 				pci_dev->addr.devid,
1328 				pci_dev->addr.function);
1329 }
1330 #endif
1331 
1332 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1333 }
1334 
1335 /*
1336  * It executes link_update after knowing an interrupt occurred.
1337  *
1338  * @param dev
1339  *  Pointer to struct rte_eth_dev.
1340  *
1341  * @return
1342  *  - On success, zero.
1343  *  - On failure, a negative value.
1344  */
1345 static int
1346 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1347 			   struct rte_intr_handle *intr_handle)
1348 {
1349 	struct atl_interrupt *intr =
1350 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1351 	struct atl_adapter *adapter = dev->data->dev_private;
1352 	struct aq_hw_s *hw = &adapter->hw;
1353 
1354 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1355 		goto done;
1356 
1357 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1358 
1359 	/* Notify userapp if link status changed */
1360 	if (!atl_dev_link_update(dev, 0)) {
1361 		atl_dev_link_status_print(dev);
1362 		_rte_eth_dev_callback_process(dev,
1363 			RTE_ETH_EVENT_INTR_LSC, NULL);
1364 	} else {
1365 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1366 			goto done;
1367 
1368 		/* Check macsec Keys expired */
1369 		struct get_stats req = { 0 };
1370 		struct macsec_msg_fw_request msg = { 0 };
1371 		struct macsec_msg_fw_response resp = { 0 };
1372 
1373 		req.ingress_sa_index = 0x0;
1374 		req.egress_sc_index = 0x0;
1375 		req.egress_sa_index = 0x0;
1376 		msg.msg_type = macsec_get_stats_msg;
1377 		msg.stats = req;
1378 
1379 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1380 		if (err) {
1381 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1382 			goto done;
1383 		}
1384 		if (resp.stats.egress_threshold_expired ||
1385 		    resp.stats.ingress_threshold_expired ||
1386 		    resp.stats.egress_expired ||
1387 		    resp.stats.ingress_expired) {
1388 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1389 			_rte_eth_dev_callback_process(dev,
1390 				RTE_ETH_EVENT_MACSEC, NULL);
1391 		}
1392 	}
1393 done:
1394 	atl_enable_intr(dev);
1395 	rte_intr_ack(intr_handle);
1396 
1397 	return 0;
1398 }
1399 
1400 /**
1401  * Interrupt handler triggered by NIC  for handling
1402  * specific interrupt.
1403  *
1404  * @param handle
1405  *  Pointer to interrupt handle.
1406  * @param param
1407  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1408  *
1409  * @return
1410  *  void
1411  */
1412 static void
1413 atl_dev_interrupt_handler(void *param)
1414 {
1415 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1416 
1417 	atl_dev_interrupt_get_status(dev);
1418 	atl_dev_interrupt_action(dev, dev->intr_handle);
1419 }
1420 
1421 
1422 static int
1423 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1424 {
1425 	return SFP_EEPROM_SIZE;
1426 }
1427 
1428 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1429 		       struct rte_dev_eeprom_info *eeprom)
1430 {
1431 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1432 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1433 
1434 	if (hw->aq_fw_ops->get_eeprom == NULL)
1435 		return -ENOTSUP;
1436 
1437 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1438 	    eeprom->data == NULL)
1439 		return -EINVAL;
1440 
1441 	if (eeprom->magic > 0x7F)
1442 		return -EINVAL;
1443 
1444 	if (eeprom->magic)
1445 		dev_addr = eeprom->magic;
1446 
1447 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1448 					 eeprom->length, eeprom->offset);
1449 }
1450 
1451 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1452 		       struct rte_dev_eeprom_info *eeprom)
1453 {
1454 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1455 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1456 
1457 	if (hw->aq_fw_ops->set_eeprom == NULL)
1458 		return -ENOTSUP;
1459 
1460 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1461 	    eeprom->data == NULL)
1462 		return -EINVAL;
1463 
1464 	if (eeprom->magic > 0x7F)
1465 		return -EINVAL;
1466 
1467 	if (eeprom->magic)
1468 		dev_addr = eeprom->magic;
1469 
1470 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1471 					 eeprom->length, eeprom->offset);
1472 }
1473 
1474 static int
1475 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1476 {
1477 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1478 	u32 mif_id;
1479 	int err;
1480 
1481 	if (regs->data == NULL) {
1482 		regs->length = hw_atl_utils_hw_get_reg_length();
1483 		regs->width = sizeof(u32);
1484 		return 0;
1485 	}
1486 
1487 	/* Only full register dump is supported */
1488 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1489 		return -ENOTSUP;
1490 
1491 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1492 
1493 	/* Device version */
1494 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1495 	regs->version = mif_id & 0xFFU;
1496 
1497 	return err;
1498 }
1499 
1500 static int
1501 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1502 {
1503 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1504 	u32 fc = AQ_NIC_FC_OFF;
1505 
1506 	if (hw->aq_fw_ops->get_flow_control == NULL)
1507 		return -ENOTSUP;
1508 
1509 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1510 
1511 	if (fc == AQ_NIC_FC_OFF)
1512 		fc_conf->mode = RTE_FC_NONE;
1513 	else if (fc & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1514 		fc_conf->mode = RTE_FC_FULL;
1515 	else if (fc & AQ_NIC_FC_RX)
1516 		fc_conf->mode = RTE_FC_RX_PAUSE;
1517 	else if (fc & AQ_NIC_FC_RX)
1518 		fc_conf->mode = RTE_FC_TX_PAUSE;
1519 
1520 	return 0;
1521 }
1522 
1523 static int
1524 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1525 {
1526 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1527 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1528 
1529 
1530 	if (hw->aq_fw_ops->set_flow_control == NULL)
1531 		return -ENOTSUP;
1532 
1533 	if (fc_conf->mode == RTE_FC_NONE)
1534 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1535 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1536 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1537 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1538 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1539 	else if (fc_conf->mode == RTE_FC_FULL)
1540 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1541 
1542 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1543 		return hw->aq_fw_ops->set_flow_control(hw);
1544 
1545 	return 0;
1546 }
1547 
1548 static int
1549 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1550 		    u8 *mac_addr, bool enable)
1551 {
1552 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1553 	unsigned int h = 0U;
1554 	unsigned int l = 0U;
1555 	int err;
1556 
1557 	if (mac_addr) {
1558 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1559 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1560 			(mac_addr[4] << 8) | mac_addr[5];
1561 	}
1562 
1563 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1564 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1565 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1566 
1567 	if (enable)
1568 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1569 
1570 	err = aq_hw_err_from_flags(hw);
1571 
1572 	return err;
1573 }
1574 
1575 static int
1576 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1577 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1578 {
1579 	if (rte_is_zero_ether_addr(mac_addr)) {
1580 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1581 		return -EINVAL;
1582 	}
1583 
1584 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1585 }
1586 
1587 static void
1588 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1589 {
1590 	atl_update_mac_addr(dev, index, NULL, false);
1591 }
1592 
1593 static int
1594 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1595 {
1596 	atl_remove_mac_addr(dev, 0);
1597 	atl_add_mac_addr(dev, addr, 0, 0);
1598 	return 0;
1599 }
1600 
1601 static int
1602 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1603 {
1604 	struct rte_eth_dev_info dev_info;
1605 	int ret;
1606 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1607 
1608 	ret = atl_dev_info_get(dev, &dev_info);
1609 	if (ret != 0)
1610 		return ret;
1611 
1612 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1613 		return -EINVAL;
1614 
1615 	/* update max frame size */
1616 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1617 
1618 	return 0;
1619 }
1620 
1621 static int
1622 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1623 {
1624 	struct aq_hw_cfg_s *cfg =
1625 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1626 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1627 	int err = 0;
1628 	int i = 0;
1629 
1630 	PMD_INIT_FUNC_TRACE();
1631 
1632 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1633 		if (cfg->vlan_filter[i] == vlan_id) {
1634 			if (!on) {
1635 				/* Disable VLAN filter. */
1636 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1637 
1638 				/* Clear VLAN filter entry */
1639 				cfg->vlan_filter[i] = 0;
1640 			}
1641 			break;
1642 		}
1643 	}
1644 
1645 	/* VLAN_ID was not found. So, nothing to delete. */
1646 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1647 		goto exit;
1648 
1649 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1650 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1651 		goto exit;
1652 
1653 	/* Try to found free VLAN filter to add new VLAN_ID */
1654 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1655 		if (cfg->vlan_filter[i] == 0)
1656 			break;
1657 	}
1658 
1659 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1660 		/* We have no free VLAN filter to add new VLAN_ID*/
1661 		err = -ENOMEM;
1662 		goto exit;
1663 	}
1664 
1665 	cfg->vlan_filter[i] = vlan_id;
1666 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1667 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1668 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1669 
1670 exit:
1671 	/* Enable VLAN promisc mode if vlan_filter empty  */
1672 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1673 		if (cfg->vlan_filter[i] != 0)
1674 			break;
1675 	}
1676 
1677 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1678 
1679 	return err;
1680 }
1681 
1682 static int
1683 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1684 {
1685 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1686 	struct aq_hw_cfg_s *cfg =
1687 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1688 	int i;
1689 
1690 	PMD_INIT_FUNC_TRACE();
1691 
1692 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1693 		if (cfg->vlan_filter[i])
1694 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1695 	}
1696 	return 0;
1697 }
1698 
1699 static int
1700 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1701 {
1702 	struct aq_hw_cfg_s *cfg =
1703 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1704 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1705 	int ret = 0;
1706 	int i;
1707 
1708 	PMD_INIT_FUNC_TRACE();
1709 
1710 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1711 
1712 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1713 
1714 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1715 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1716 
1717 	if (mask & ETH_VLAN_EXTEND_MASK)
1718 		ret = -ENOTSUP;
1719 
1720 	return ret;
1721 }
1722 
1723 static int
1724 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1725 		  uint16_t tpid)
1726 {
1727 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1728 	int err = 0;
1729 
1730 	PMD_INIT_FUNC_TRACE();
1731 
1732 	switch (vlan_type) {
1733 	case ETH_VLAN_TYPE_INNER:
1734 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1735 		break;
1736 	case ETH_VLAN_TYPE_OUTER:
1737 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1738 		break;
1739 	default:
1740 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1741 		err = -ENOTSUP;
1742 	}
1743 
1744 	return err;
1745 }
1746 
1747 static void
1748 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1749 {
1750 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1751 
1752 	PMD_INIT_FUNC_TRACE();
1753 
1754 	if (queue_id > dev->data->nb_rx_queues) {
1755 		PMD_DRV_LOG(ERR, "Invalid queue id");
1756 		return;
1757 	}
1758 
1759 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1760 }
1761 
1762 static int
1763 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1764 			  struct rte_ether_addr *mc_addr_set,
1765 			  uint32_t nb_mc_addr)
1766 {
1767 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1768 	u32 i;
1769 
1770 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1771 		return -EINVAL;
1772 
1773 	/* Update whole uc filters table */
1774 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1775 		u8 *mac_addr = NULL;
1776 		u32 l = 0, h = 0;
1777 
1778 		if (i < nb_mc_addr) {
1779 			mac_addr = mc_addr_set[i].addr_bytes;
1780 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1781 				(mac_addr[4] << 8) | mac_addr[5];
1782 			h = (mac_addr[0] << 8) | mac_addr[1];
1783 		}
1784 
1785 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1786 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1787 							HW_ATL_B0_MAC_MIN + i);
1788 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1789 							HW_ATL_B0_MAC_MIN + i);
1790 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1791 					   HW_ATL_B0_MAC_MIN + i);
1792 	}
1793 
1794 	return 0;
1795 }
1796 
1797 static int
1798 atl_reta_update(struct rte_eth_dev *dev,
1799 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1800 		   uint16_t reta_size)
1801 {
1802 	int i;
1803 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1804 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1805 
1806 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1807 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1808 					dev->data->nb_rx_queues - 1);
1809 
1810 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1811 	return 0;
1812 }
1813 
1814 static int
1815 atl_reta_query(struct rte_eth_dev *dev,
1816 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1817 		    uint16_t reta_size)
1818 {
1819 	int i;
1820 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1821 
1822 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1823 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1824 	reta_conf->mask = ~0U;
1825 	return 0;
1826 }
1827 
1828 static int
1829 atl_rss_hash_update(struct rte_eth_dev *dev,
1830 				 struct rte_eth_rss_conf *rss_conf)
1831 {
1832 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1833 	struct aq_hw_cfg_s *cfg =
1834 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1835 	static u8 def_rss_key[40] = {
1836 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1837 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1838 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1839 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1840 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1841 	};
1842 
1843 	cfg->is_rss = !!rss_conf->rss_hf;
1844 	if (rss_conf->rss_key) {
1845 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1846 		       rss_conf->rss_key_len);
1847 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1848 	} else {
1849 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1850 		       sizeof(def_rss_key));
1851 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1852 	}
1853 
1854 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1855 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1856 	return 0;
1857 }
1858 
1859 static int
1860 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1861 				 struct rte_eth_rss_conf *rss_conf)
1862 {
1863 	struct aq_hw_cfg_s *cfg =
1864 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1865 
1866 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1867 	if (rss_conf->rss_key) {
1868 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1869 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1870 		       rss_conf->rss_key_len);
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static bool
1877 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1878 {
1879 	if (strcmp(dev->device->driver->name, drv->driver.name))
1880 		return false;
1881 
1882 	return true;
1883 }
1884 
1885 bool
1886 is_atlantic_supported(struct rte_eth_dev *dev)
1887 {
1888 	return is_device_supported(dev, &rte_atl_pmd);
1889 }
1890 
1891 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1892 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1893 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1894 
1895 RTE_INIT(atl_init_log)
1896 {
1897 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1898 	if (atl_logtype_init >= 0)
1899 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1900 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1901 	if (atl_logtype_driver >= 0)
1902 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1903 }
1904