xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision d1093f66a89dda96fad87f33db4a5bd386fec84e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 				    struct rte_eth_xstat_name *xstats_names,
35 				    unsigned int size);
36 
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 				struct rte_eth_stats *stats);
39 
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 			      struct rte_eth_xstat *stats, unsigned int n);
42 
43 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
44 
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 			      size_t fw_size);
47 
48 static void atl_dev_info_get(struct rte_eth_dev *dev,
49 			       struct rte_eth_dev_info *dev_info);
50 
51 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 
53 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 
55 /* VLAN stuff */
56 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
57 		uint16_t vlan_id, int on);
58 
59 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
60 
61 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
62 				     uint16_t queue_id, int on);
63 
64 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
65 			     enum rte_vlan_type vlan_type, uint16_t tpid);
66 
67 /* EEPROM */
68 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
69 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
72 			      struct rte_dev_eeprom_info *eeprom);
73 
74 /* Regs */
75 static int atl_dev_get_regs(struct rte_eth_dev *dev,
76 			    struct rte_dev_reg_info *regs);
77 
78 /* Flow control */
79 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
82 			       struct rte_eth_fc_conf *fc_conf);
83 
84 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
85 
86 /* Interrupts */
87 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
88 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
89 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
90 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
91 				    struct rte_intr_handle *handle);
92 static void atl_dev_interrupt_handler(void *param);
93 
94 
95 static int atl_add_mac_addr(struct rte_eth_dev *dev,
96 			    struct ether_addr *mac_addr,
97 			    uint32_t index, uint32_t pool);
98 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
99 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
100 					   struct ether_addr *mac_addr);
101 
102 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
103 				    struct ether_addr *mc_addr_set,
104 				    uint32_t nb_mc_addr);
105 
106 /* RSS */
107 static int atl_reta_update(struct rte_eth_dev *dev,
108 			     struct rte_eth_rss_reta_entry64 *reta_conf,
109 			     uint16_t reta_size);
110 static int atl_reta_query(struct rte_eth_dev *dev,
111 			    struct rte_eth_rss_reta_entry64 *reta_conf,
112 			    uint16_t reta_size);
113 static int atl_rss_hash_update(struct rte_eth_dev *dev,
114 				 struct rte_eth_rss_conf *rss_conf);
115 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
116 				   struct rte_eth_rss_conf *rss_conf);
117 
118 
119 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
120 	struct rte_pci_device *pci_dev);
121 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
122 
123 static void atl_dev_info_get(struct rte_eth_dev *dev,
124 				struct rte_eth_dev_info *dev_info);
125 
126 int atl_logtype_init;
127 int atl_logtype_driver;
128 
129 /*
130  * The set of PCI devices this driver supports
131  */
132 static const struct rte_pci_id pci_id_atl_map[] = {
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
138 
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
145 
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
152 
153 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
154 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
155 	{ .vendor_id = 0, /* sentinel */ },
156 };
157 
158 static struct rte_pci_driver rte_atl_pmd = {
159 	.id_table = pci_id_atl_map,
160 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
161 		     RTE_PCI_DRV_IOVA_AS_VA,
162 	.probe = eth_atl_pci_probe,
163 	.remove = eth_atl_pci_remove,
164 };
165 
166 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
167 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
168 			| DEV_RX_OFFLOAD_UDP_CKSUM \
169 			| DEV_RX_OFFLOAD_TCP_CKSUM \
170 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
171 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
172 			| DEV_RX_OFFLOAD_VLAN_FILTER)
173 
174 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
175 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
176 			| DEV_TX_OFFLOAD_UDP_CKSUM \
177 			| DEV_TX_OFFLOAD_TCP_CKSUM \
178 			| DEV_TX_OFFLOAD_TCP_TSO \
179 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
180 			| DEV_TX_OFFLOAD_MULTI_SEGS)
181 
182 #define SFP_EEPROM_SIZE 0x100
183 
184 static const struct rte_eth_desc_lim rx_desc_lim = {
185 	.nb_max = ATL_MAX_RING_DESC,
186 	.nb_min = ATL_MIN_RING_DESC,
187 	.nb_align = ATL_RXD_ALIGN,
188 };
189 
190 static const struct rte_eth_desc_lim tx_desc_lim = {
191 	.nb_max = ATL_MAX_RING_DESC,
192 	.nb_min = ATL_MIN_RING_DESC,
193 	.nb_align = ATL_TXD_ALIGN,
194 	.nb_seg_max = ATL_TX_MAX_SEG,
195 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
196 };
197 
198 enum atl_xstats_type {
199 	XSTATS_TYPE_MSM = 0,
200 	XSTATS_TYPE_MACSEC,
201 };
202 
203 #define ATL_XSTATS_FIELD(name) { \
204 	#name, \
205 	offsetof(struct aq_stats_s, name), \
206 	XSTATS_TYPE_MSM \
207 }
208 
209 #define ATL_MACSEC_XSTATS_FIELD(name) { \
210 	#name, \
211 	offsetof(struct macsec_stats, name), \
212 	XSTATS_TYPE_MACSEC \
213 }
214 
215 struct atl_xstats_tbl_s {
216 	const char *name;
217 	unsigned int offset;
218 	enum atl_xstats_type type;
219 };
220 
221 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
222 	ATL_XSTATS_FIELD(uprc),
223 	ATL_XSTATS_FIELD(mprc),
224 	ATL_XSTATS_FIELD(bprc),
225 	ATL_XSTATS_FIELD(erpt),
226 	ATL_XSTATS_FIELD(uptc),
227 	ATL_XSTATS_FIELD(mptc),
228 	ATL_XSTATS_FIELD(bptc),
229 	ATL_XSTATS_FIELD(erpr),
230 	ATL_XSTATS_FIELD(ubrc),
231 	ATL_XSTATS_FIELD(ubtc),
232 	ATL_XSTATS_FIELD(mbrc),
233 	ATL_XSTATS_FIELD(mbtc),
234 	ATL_XSTATS_FIELD(bbrc),
235 	ATL_XSTATS_FIELD(bbtc),
236 	/* Ingress Common Counters */
237 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
239 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
244 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
245 	/* Ingress SA Counters */
246 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
247 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
248 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
249 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
250 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
251 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
252 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
254 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
255 	/* Egress Common Counters */
256 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
259 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
260 	/* Egress SC Counters */
261 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
262 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
263 	/* Egress SA Counters */
264 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
265 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
266 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
267 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
268 };
269 
270 static const struct eth_dev_ops atl_eth_dev_ops = {
271 	.dev_configure	      = atl_dev_configure,
272 	.dev_start	      = atl_dev_start,
273 	.dev_stop	      = atl_dev_stop,
274 	.dev_set_link_up      = atl_dev_set_link_up,
275 	.dev_set_link_down    = atl_dev_set_link_down,
276 	.dev_close	      = atl_dev_close,
277 	.dev_reset	      = atl_dev_reset,
278 
279 	/* PROMISC */
280 	.promiscuous_enable   = atl_dev_promiscuous_enable,
281 	.promiscuous_disable  = atl_dev_promiscuous_disable,
282 	.allmulticast_enable  = atl_dev_allmulticast_enable,
283 	.allmulticast_disable = atl_dev_allmulticast_disable,
284 
285 	/* Link */
286 	.link_update	      = atl_dev_link_update,
287 
288 	.get_reg              = atl_dev_get_regs,
289 
290 	/* Stats */
291 	.stats_get	      = atl_dev_stats_get,
292 	.xstats_get	      = atl_dev_xstats_get,
293 	.xstats_get_names     = atl_dev_xstats_get_names,
294 	.stats_reset	      = atl_dev_stats_reset,
295 	.xstats_reset	      = atl_dev_stats_reset,
296 
297 	.fw_version_get       = atl_fw_version_get,
298 	.dev_infos_get	      = atl_dev_info_get,
299 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
300 
301 	.mtu_set              = atl_dev_mtu_set,
302 
303 	/* VLAN */
304 	.vlan_filter_set      = atl_vlan_filter_set,
305 	.vlan_offload_set     = atl_vlan_offload_set,
306 	.vlan_tpid_set        = atl_vlan_tpid_set,
307 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
308 
309 	/* Queue Control */
310 	.rx_queue_start	      = atl_rx_queue_start,
311 	.rx_queue_stop	      = atl_rx_queue_stop,
312 	.rx_queue_setup       = atl_rx_queue_setup,
313 	.rx_queue_release     = atl_rx_queue_release,
314 
315 	.tx_queue_start	      = atl_tx_queue_start,
316 	.tx_queue_stop	      = atl_tx_queue_stop,
317 	.tx_queue_setup       = atl_tx_queue_setup,
318 	.tx_queue_release     = atl_tx_queue_release,
319 
320 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
321 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
322 
323 	.rx_queue_count       = atl_rx_queue_count,
324 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
325 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
326 
327 	/* EEPROM */
328 	.get_eeprom_length    = atl_dev_get_eeprom_length,
329 	.get_eeprom           = atl_dev_get_eeprom,
330 	.set_eeprom           = atl_dev_set_eeprom,
331 
332 	/* Flow Control */
333 	.flow_ctrl_get	      = atl_flow_ctrl_get,
334 	.flow_ctrl_set	      = atl_flow_ctrl_set,
335 
336 	/* MAC */
337 	.mac_addr_add	      = atl_add_mac_addr,
338 	.mac_addr_remove      = atl_remove_mac_addr,
339 	.mac_addr_set	      = atl_set_default_mac_addr,
340 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
341 	.rxq_info_get	      = atl_rxq_info_get,
342 	.txq_info_get	      = atl_txq_info_get,
343 
344 	.reta_update          = atl_reta_update,
345 	.reta_query           = atl_reta_query,
346 	.rss_hash_update      = atl_rss_hash_update,
347 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
348 };
349 
350 static inline int32_t
351 atl_reset_hw(struct aq_hw_s *hw)
352 {
353 	return hw_atl_b0_hw_reset(hw);
354 }
355 
356 static inline void
357 atl_enable_intr(struct rte_eth_dev *dev)
358 {
359 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
360 
361 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
362 }
363 
364 static void
365 atl_disable_intr(struct aq_hw_s *hw)
366 {
367 	PMD_INIT_FUNC_TRACE();
368 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
369 }
370 
371 static int
372 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
373 {
374 	struct atl_adapter *adapter =
375 		(struct atl_adapter *)eth_dev->data->dev_private;
376 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
377 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
378 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
379 	int err = 0;
380 
381 	PMD_INIT_FUNC_TRACE();
382 
383 	eth_dev->dev_ops = &atl_eth_dev_ops;
384 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
385 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
386 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
387 
388 	/* For secondary processes, the primary process has done all the work */
389 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
390 		return 0;
391 
392 	/* Vendor and Device ID need to be set before init of shared code */
393 	hw->device_id = pci_dev->id.device_id;
394 	hw->vendor_id = pci_dev->id.vendor_id;
395 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
396 
397 	/* Hardware configuration - hardcode */
398 	adapter->hw_cfg.is_lro = false;
399 	adapter->hw_cfg.wol = false;
400 	adapter->hw_cfg.is_rss = false;
401 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
402 
403 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
404 			  AQ_NIC_RATE_5G |
405 			  AQ_NIC_RATE_2G5 |
406 			  AQ_NIC_RATE_1G |
407 			  AQ_NIC_RATE_100M;
408 
409 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
410 	adapter->hw_cfg.aq_rss.indirection_table_size =
411 		HW_ATL_B0_RSS_REDIRECTION_MAX;
412 
413 	hw->aq_nic_cfg = &adapter->hw_cfg;
414 
415 	/* disable interrupt */
416 	atl_disable_intr(hw);
417 
418 	/* Allocate memory for storing MAC addresses */
419 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
420 	if (eth_dev->data->mac_addrs == NULL) {
421 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
422 		return -ENOMEM;
423 	}
424 
425 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
426 	if (err)
427 		return err;
428 
429 	/* Copy the permanent MAC address */
430 	if (hw->aq_fw_ops->get_mac_permanent(hw,
431 			eth_dev->data->mac_addrs->addr_bytes) != 0)
432 		return -EINVAL;
433 
434 	/* Reset the hw statistics */
435 	atl_dev_stats_reset(eth_dev);
436 
437 	rte_intr_callback_register(intr_handle,
438 				   atl_dev_interrupt_handler, eth_dev);
439 
440 	/* enable uio/vfio intr/eventfd mapping */
441 	rte_intr_enable(intr_handle);
442 
443 	/* enable support intr */
444 	atl_enable_intr(eth_dev);
445 
446 	return err;
447 }
448 
449 static int
450 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
451 {
452 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
453 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
454 	struct aq_hw_s *hw;
455 
456 	PMD_INIT_FUNC_TRACE();
457 
458 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
459 		return -EPERM;
460 
461 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
462 
463 	if (hw->adapter_stopped == 0)
464 		atl_dev_close(eth_dev);
465 
466 	eth_dev->dev_ops = NULL;
467 	eth_dev->rx_pkt_burst = NULL;
468 	eth_dev->tx_pkt_burst = NULL;
469 
470 	/* disable uio intr before callback unregister */
471 	rte_intr_disable(intr_handle);
472 	rte_intr_callback_unregister(intr_handle,
473 				     atl_dev_interrupt_handler, eth_dev);
474 
475 	rte_free(eth_dev->data->mac_addrs);
476 	eth_dev->data->mac_addrs = NULL;
477 
478 	return 0;
479 }
480 
481 static int
482 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
483 	struct rte_pci_device *pci_dev)
484 {
485 	return rte_eth_dev_pci_generic_probe(pci_dev,
486 		sizeof(struct atl_adapter), eth_atl_dev_init);
487 }
488 
489 static int
490 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
491 {
492 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
493 }
494 
495 static int
496 atl_dev_configure(struct rte_eth_dev *dev)
497 {
498 	struct atl_interrupt *intr =
499 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
500 
501 	PMD_INIT_FUNC_TRACE();
502 
503 	/* set flag to update link status after init */
504 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
505 
506 	return 0;
507 }
508 
509 /*
510  * Configure device link speed and setup link.
511  * It returns 0 on success.
512  */
513 static int
514 atl_dev_start(struct rte_eth_dev *dev)
515 {
516 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
517 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
518 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
519 	uint32_t intr_vector = 0;
520 	int status;
521 	int err;
522 
523 	PMD_INIT_FUNC_TRACE();
524 
525 	/* set adapter started */
526 	hw->adapter_stopped = 0;
527 
528 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
529 		PMD_INIT_LOG(ERR,
530 		"Invalid link_speeds for port %u, fix speed not supported",
531 				dev->data->port_id);
532 		return -EINVAL;
533 	}
534 
535 	/* disable uio/vfio intr/eventfd mapping */
536 	rte_intr_disable(intr_handle);
537 
538 	/* reinitialize adapter
539 	 * this calls reset and start
540 	 */
541 	status = atl_reset_hw(hw);
542 	if (status != 0)
543 		return -EIO;
544 
545 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
546 
547 	hw_atl_b0_hw_start(hw);
548 	/* check and configure queue intr-vector mapping */
549 	if ((rte_intr_cap_multiple(intr_handle) ||
550 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
551 	    dev->data->dev_conf.intr_conf.rxq != 0) {
552 		intr_vector = dev->data->nb_rx_queues;
553 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
554 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
555 					ATL_MAX_INTR_QUEUE_NUM);
556 			return -ENOTSUP;
557 		}
558 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
559 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
560 			return -1;
561 		}
562 	}
563 
564 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
565 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
566 				    dev->data->nb_rx_queues * sizeof(int), 0);
567 		if (intr_handle->intr_vec == NULL) {
568 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
569 				     " intr_vec", dev->data->nb_rx_queues);
570 			return -ENOMEM;
571 		}
572 	}
573 
574 	/* initialize transmission unit */
575 	atl_tx_init(dev);
576 
577 	/* This can fail when allocating mbufs for descriptor rings */
578 	err = atl_rx_init(dev);
579 	if (err) {
580 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
581 		goto error;
582 	}
583 
584 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
585 		hw->fw_ver_actual >> 24,
586 		(hw->fw_ver_actual >> 16) & 0xFF,
587 		hw->fw_ver_actual & 0xFFFF);
588 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
589 
590 	err = atl_start_queues(dev);
591 	if (err < 0) {
592 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
593 		goto error;
594 	}
595 
596 	err = atl_dev_set_link_up(dev);
597 
598 	err = hw->aq_fw_ops->update_link_status(hw);
599 
600 	if (err)
601 		goto error;
602 
603 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
604 
605 	if (err)
606 		goto error;
607 
608 	if (rte_intr_allow_others(intr_handle)) {
609 		/* check if lsc interrupt is enabled */
610 		if (dev->data->dev_conf.intr_conf.lsc != 0)
611 			atl_dev_lsc_interrupt_setup(dev, true);
612 		else
613 			atl_dev_lsc_interrupt_setup(dev, false);
614 	} else {
615 		rte_intr_callback_unregister(intr_handle,
616 					     atl_dev_interrupt_handler, dev);
617 		if (dev->data->dev_conf.intr_conf.lsc != 0)
618 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
619 				     " no intr multiplex");
620 	}
621 
622 	/* check if rxq interrupt is enabled */
623 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
624 	    rte_intr_dp_is_en(intr_handle))
625 		atl_dev_rxq_interrupt_setup(dev);
626 
627 	/* enable uio/vfio intr/eventfd mapping */
628 	rte_intr_enable(intr_handle);
629 
630 	/* resume enabled intr since hw reset */
631 	atl_enable_intr(dev);
632 
633 	return 0;
634 
635 error:
636 	atl_stop_queues(dev);
637 	return -EIO;
638 }
639 
640 /*
641  * Stop device: disable rx and tx functions to allow for reconfiguring.
642  */
643 static void
644 atl_dev_stop(struct rte_eth_dev *dev)
645 {
646 	struct rte_eth_link link;
647 	struct aq_hw_s *hw =
648 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
649 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
650 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
651 
652 	PMD_INIT_FUNC_TRACE();
653 
654 	/* disable interrupts */
655 	atl_disable_intr(hw);
656 
657 	/* reset the NIC */
658 	atl_reset_hw(hw);
659 	hw->adapter_stopped = 1;
660 
661 	atl_stop_queues(dev);
662 
663 	/* Clear stored conf */
664 	dev->data->scattered_rx = 0;
665 	dev->data->lro = 0;
666 
667 	/* Clear recorded link status */
668 	memset(&link, 0, sizeof(link));
669 	rte_eth_linkstatus_set(dev, &link);
670 
671 	if (!rte_intr_allow_others(intr_handle))
672 		/* resume to the default handler */
673 		rte_intr_callback_register(intr_handle,
674 					   atl_dev_interrupt_handler,
675 					   (void *)dev);
676 
677 	/* Clean datapath event and queue/vec mapping */
678 	rte_intr_efd_disable(intr_handle);
679 	if (intr_handle->intr_vec != NULL) {
680 		rte_free(intr_handle->intr_vec);
681 		intr_handle->intr_vec = NULL;
682 	}
683 }
684 
685 /*
686  * Set device link up: enable tx.
687  */
688 static int
689 atl_dev_set_link_up(struct rte_eth_dev *dev)
690 {
691 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
692 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
693 	uint32_t speed_mask = 0;
694 
695 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
696 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
697 	} else {
698 		if (link_speeds & ETH_LINK_SPEED_10G)
699 			speed_mask |= AQ_NIC_RATE_10G;
700 		if (link_speeds & ETH_LINK_SPEED_5G)
701 			speed_mask |= AQ_NIC_RATE_5G;
702 		if (link_speeds & ETH_LINK_SPEED_1G)
703 			speed_mask |= AQ_NIC_RATE_1G;
704 		if (link_speeds & ETH_LINK_SPEED_2_5G)
705 			speed_mask |=  AQ_NIC_RATE_2G5;
706 		if (link_speeds & ETH_LINK_SPEED_100M)
707 			speed_mask |= AQ_NIC_RATE_100M;
708 	}
709 
710 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
711 }
712 
713 /*
714  * Set device link down: disable tx.
715  */
716 static int
717 atl_dev_set_link_down(struct rte_eth_dev *dev)
718 {
719 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
720 
721 	return hw->aq_fw_ops->set_link_speed(hw, 0);
722 }
723 
724 /*
725  * Reset and stop device.
726  */
727 static void
728 atl_dev_close(struct rte_eth_dev *dev)
729 {
730 	PMD_INIT_FUNC_TRACE();
731 
732 	atl_dev_stop(dev);
733 
734 	atl_free_queues(dev);
735 }
736 
737 static int
738 atl_dev_reset(struct rte_eth_dev *dev)
739 {
740 	int ret;
741 
742 	ret = eth_atl_dev_uninit(dev);
743 	if (ret)
744 		return ret;
745 
746 	ret = eth_atl_dev_init(dev);
747 
748 	return ret;
749 }
750 
751 static int
752 atl_dev_configure_macsec(struct rte_eth_dev *dev)
753 {
754 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
755 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
756 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
757 	struct macsec_msg_fw_request msg_macsec;
758 	struct macsec_msg_fw_response response;
759 
760 	if (!aqcfg->common.macsec_enabled ||
761 	    hw->aq_fw_ops->send_macsec_req == NULL)
762 		return 0;
763 
764 	memset(&msg_macsec, 0, sizeof(msg_macsec));
765 
766 	/* Creating set of sc/sa structures from parameters provided by DPDK */
767 
768 	/* Configure macsec */
769 	msg_macsec.msg_type = macsec_cfg_msg;
770 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
771 	msg_macsec.cfg.interrupts_enabled = 1;
772 
773 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
774 
775 	if (response.result)
776 		return -1;
777 
778 	memset(&msg_macsec, 0, sizeof(msg_macsec));
779 
780 	/* Configure TX SC */
781 
782 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
783 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
784 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
785 
786 	/* MAC addr for TX */
787 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
788 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
789 	msg_macsec.txsc.sa_mask = 0x3f;
790 
791 	msg_macsec.txsc.da_mask = 0;
792 	msg_macsec.txsc.tci = 0x0B;
793 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
794 
795 	/*
796 	 * Creating SCI (Secure Channel Identifier).
797 	 * SCI constructed from Source MAC and Port identifier
798 	 */
799 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
800 			       (msg_macsec.txsc.mac_sa[0] >> 16);
801 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
802 
803 	uint32_t port_identifier = 1;
804 
805 	msg_macsec.txsc.sci[1] = sci_hi_part;
806 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
807 
808 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
809 
810 	if (response.result)
811 		return -1;
812 
813 	memset(&msg_macsec, 0, sizeof(msg_macsec));
814 
815 	/* Configure RX SC */
816 
817 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
818 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
819 	msg_macsec.rxsc.replay_protect =
820 		aqcfg->common.replay_protection_enabled;
821 	msg_macsec.rxsc.anti_replay_window = 0;
822 
823 	/* MAC addr for RX */
824 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
825 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
826 	msg_macsec.rxsc.da_mask = 0;//0x3f;
827 
828 	msg_macsec.rxsc.sa_mask = 0;
829 
830 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
831 
832 	if (response.result)
833 		return -1;
834 
835 	memset(&msg_macsec, 0, sizeof(msg_macsec));
836 
837 	/* Configure RX SC */
838 
839 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
840 	msg_macsec.txsa.index = aqcfg->txsa.idx;
841 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
842 
843 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
844 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
845 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
846 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
847 
848 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
849 
850 	if (response.result)
851 		return -1;
852 
853 	memset(&msg_macsec, 0, sizeof(msg_macsec));
854 
855 	/* Configure RX SA */
856 
857 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
858 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
859 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
860 
861 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
862 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
863 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
864 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
865 
866 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
867 
868 	if (response.result)
869 		return -1;
870 
871 	return 0;
872 }
873 
874 int atl_macsec_enable(struct rte_eth_dev *dev,
875 		      uint8_t encr, uint8_t repl_prot)
876 {
877 	struct aq_hw_cfg_s *cfg =
878 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
879 
880 	cfg->aq_macsec.common.macsec_enabled = 1;
881 	cfg->aq_macsec.common.encryption_enabled = encr;
882 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
883 
884 	return 0;
885 }
886 
887 int atl_macsec_disable(struct rte_eth_dev *dev)
888 {
889 	struct aq_hw_cfg_s *cfg =
890 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
891 
892 	cfg->aq_macsec.common.macsec_enabled = 0;
893 
894 	return 0;
895 }
896 
897 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
898 {
899 	struct aq_hw_cfg_s *cfg =
900 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
901 
902 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
903 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
904 
905 	return 0;
906 }
907 
908 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
909 			   uint8_t *mac, uint16_t pi)
910 {
911 	struct aq_hw_cfg_s *cfg =
912 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
913 
914 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
915 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
916 	cfg->aq_macsec.rxsc.pi = pi;
917 
918 	return 0;
919 }
920 
921 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
922 			   uint8_t idx, uint8_t an,
923 			   uint32_t pn, uint8_t *key)
924 {
925 	struct aq_hw_cfg_s *cfg =
926 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
927 
928 	cfg->aq_macsec.txsa.idx = idx;
929 	cfg->aq_macsec.txsa.pn = pn;
930 	cfg->aq_macsec.txsa.an = an;
931 
932 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
933 	return 0;
934 }
935 
936 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
937 			   uint8_t idx, uint8_t an,
938 			   uint32_t pn, uint8_t *key)
939 {
940 	struct aq_hw_cfg_s *cfg =
941 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
942 
943 	cfg->aq_macsec.rxsa.idx = idx;
944 	cfg->aq_macsec.rxsa.pn = pn;
945 	cfg->aq_macsec.rxsa.an = an;
946 
947 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
948 	return 0;
949 }
950 
951 static int
952 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
953 {
954 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
955 	struct aq_hw_s *hw = &adapter->hw;
956 	struct atl_sw_stats *swstats = &adapter->sw_stats;
957 	unsigned int i;
958 
959 	hw->aq_fw_ops->update_stats(hw);
960 
961 	/* Fill out the rte_eth_stats statistics structure */
962 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
963 	stats->ibytes = hw->curr_stats.dma_oct_rc;
964 	stats->imissed = hw->curr_stats.dpc;
965 	stats->ierrors = hw->curr_stats.erpt;
966 
967 	stats->opackets = hw->curr_stats.dma_pkt_tc;
968 	stats->obytes = hw->curr_stats.dma_oct_tc;
969 	stats->oerrors = 0;
970 
971 	stats->rx_nombuf = swstats->rx_nombuf;
972 
973 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
974 		stats->q_ipackets[i] = swstats->q_ipackets[i];
975 		stats->q_opackets[i] = swstats->q_opackets[i];
976 		stats->q_ibytes[i] = swstats->q_ibytes[i];
977 		stats->q_obytes[i] = swstats->q_obytes[i];
978 		stats->q_errors[i] = swstats->q_errors[i];
979 	}
980 	return 0;
981 }
982 
983 static void
984 atl_dev_stats_reset(struct rte_eth_dev *dev)
985 {
986 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
987 	struct aq_hw_s *hw = &adapter->hw;
988 
989 	hw->aq_fw_ops->update_stats(hw);
990 
991 	/* Reset software totals */
992 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
993 
994 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
995 }
996 
997 static int
998 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
999 			 struct rte_eth_xstat_name *xstats_names,
1000 			 unsigned int size)
1001 {
1002 	unsigned int i;
1003 
1004 	if (!xstats_names)
1005 		return RTE_DIM(atl_xstats_tbl);
1006 
1007 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
1008 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
1009 			RTE_ETH_XSTATS_NAME_SIZE);
1010 
1011 	return i;
1012 }
1013 
1014 static int
1015 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1016 		   unsigned int n)
1017 {
1018 	struct atl_adapter *adapter =
1019 	(struct atl_adapter *)dev->data->dev_private;
1020 	struct aq_hw_s *hw = &adapter->hw;
1021 	struct get_stats req = { 0 };
1022 	struct macsec_msg_fw_request msg = { 0 };
1023 	struct macsec_msg_fw_response resp = { 0 };
1024 	int err = -1;
1025 	unsigned int i;
1026 
1027 	if (!stats)
1028 		return 0;
1029 
1030 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1031 		req.ingress_sa_index = 0xff;
1032 		req.egress_sc_index = 0xff;
1033 		req.egress_sa_index = 0xff;
1034 
1035 		msg.msg_type = macsec_get_stats_msg;
1036 		msg.stats = req;
1037 
1038 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1039 	}
1040 
1041 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
1042 		stats[i].id = i;
1043 
1044 		switch (atl_xstats_tbl[i].type) {
1045 		case XSTATS_TYPE_MSM:
1046 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1047 					 atl_xstats_tbl[i].offset);
1048 			break;
1049 		case XSTATS_TYPE_MACSEC:
1050 			if (err)
1051 				goto done;
1052 			stats[i].value = *(u64 *)((uint8_t *)&resp.stats +
1053 					 atl_xstats_tbl[i].offset);
1054 			break;
1055 		}
1056 	}
1057 done:
1058 	return i;
1059 }
1060 
1061 static int
1062 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1063 {
1064 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1065 	uint32_t fw_ver = 0;
1066 	unsigned int ret = 0;
1067 
1068 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1069 	if (ret)
1070 		return -EIO;
1071 
1072 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1073 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1074 
1075 	ret += 1; /* add string null-terminator */
1076 
1077 	if (fw_size < ret)
1078 		return ret;
1079 
1080 	return 0;
1081 }
1082 
1083 static void
1084 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1085 {
1086 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1087 
1088 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1089 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1090 
1091 	dev_info->min_rx_bufsize = 1024;
1092 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1093 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1094 	dev_info->max_vfs = pci_dev->max_vfs;
1095 
1096 	dev_info->max_hash_mac_addrs = 0;
1097 	dev_info->max_vmdq_pools = 0;
1098 	dev_info->vmdq_queue_num = 0;
1099 
1100 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1101 
1102 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1103 
1104 
1105 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1106 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1107 	};
1108 
1109 	dev_info->default_txconf = (struct rte_eth_txconf) {
1110 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1111 	};
1112 
1113 	dev_info->rx_desc_lim = rx_desc_lim;
1114 	dev_info->tx_desc_lim = tx_desc_lim;
1115 
1116 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1117 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1118 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1119 
1120 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1121 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1122 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1123 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1124 }
1125 
1126 static const uint32_t *
1127 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1128 {
1129 	static const uint32_t ptypes[] = {
1130 		RTE_PTYPE_L2_ETHER,
1131 		RTE_PTYPE_L2_ETHER_ARP,
1132 		RTE_PTYPE_L2_ETHER_VLAN,
1133 		RTE_PTYPE_L3_IPV4,
1134 		RTE_PTYPE_L3_IPV6,
1135 		RTE_PTYPE_L4_TCP,
1136 		RTE_PTYPE_L4_UDP,
1137 		RTE_PTYPE_L4_SCTP,
1138 		RTE_PTYPE_L4_ICMP,
1139 		RTE_PTYPE_UNKNOWN
1140 	};
1141 
1142 	if (dev->rx_pkt_burst == atl_recv_pkts)
1143 		return ptypes;
1144 
1145 	return NULL;
1146 }
1147 
1148 static void
1149 atl_dev_delayed_handler(void *param)
1150 {
1151 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1152 
1153 	atl_dev_configure_macsec(dev);
1154 }
1155 
1156 
1157 /* return 0 means link status changed, -1 means not changed */
1158 static int
1159 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1160 {
1161 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1162 	struct rte_eth_link link, old;
1163 	int err = 0;
1164 
1165 	link.link_status = ETH_LINK_DOWN;
1166 	link.link_speed = 0;
1167 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1168 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1169 	memset(&old, 0, sizeof(old));
1170 
1171 	/* load old link status */
1172 	rte_eth_linkstatus_get(dev, &old);
1173 
1174 	/* read current link status */
1175 	err = hw->aq_fw_ops->update_link_status(hw);
1176 
1177 	if (err)
1178 		return 0;
1179 
1180 	if (hw->aq_link_status.mbps == 0) {
1181 		/* write default (down) link status */
1182 		rte_eth_linkstatus_set(dev, &link);
1183 		if (link.link_status == old.link_status)
1184 			return -1;
1185 		return 0;
1186 	}
1187 
1188 	link.link_status = ETH_LINK_UP;
1189 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1190 	link.link_speed = hw->aq_link_status.mbps;
1191 
1192 	rte_eth_linkstatus_set(dev, &link);
1193 
1194 	if (link.link_status == old.link_status)
1195 		return -1;
1196 
1197 	if (rte_eal_alarm_set(1000 * 1000,
1198 			      atl_dev_delayed_handler, (void *)dev) < 0)
1199 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1200 
1201 	return 0;
1202 }
1203 
1204 static void
1205 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1206 {
1207 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1208 
1209 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1210 }
1211 
1212 static void
1213 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1214 {
1215 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1216 
1217 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1218 }
1219 
1220 static void
1221 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1222 {
1223 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1224 
1225 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1226 }
1227 
1228 static void
1229 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1230 {
1231 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1232 
1233 	if (dev->data->promiscuous == 1)
1234 		return; /* must remain in all_multicast mode */
1235 
1236 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1237 }
1238 
1239 /**
1240  * It clears the interrupt causes and enables the interrupt.
1241  * It will be called once only during nic initialized.
1242  *
1243  * @param dev
1244  *  Pointer to struct rte_eth_dev.
1245  * @param on
1246  *  Enable or Disable.
1247  *
1248  * @return
1249  *  - On success, zero.
1250  *  - On failure, a negative value.
1251  */
1252 
1253 static int
1254 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1255 {
1256 	atl_dev_link_status_print(dev);
1257 	return 0;
1258 }
1259 
1260 static int
1261 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1262 {
1263 	return 0;
1264 }
1265 
1266 
1267 static int
1268 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1269 {
1270 	struct atl_interrupt *intr =
1271 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1272 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1273 	u64 cause = 0;
1274 
1275 	hw_atl_b0_hw_irq_read(hw, &cause);
1276 
1277 	atl_disable_intr(hw);
1278 
1279 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1280 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1281 
1282 	return 0;
1283 }
1284 
1285 /**
1286  * It gets and then prints the link status.
1287  *
1288  * @param dev
1289  *  Pointer to struct rte_eth_dev.
1290  *
1291  * @return
1292  *  - On success, zero.
1293  *  - On failure, a negative value.
1294  */
1295 static void
1296 atl_dev_link_status_print(struct rte_eth_dev *dev)
1297 {
1298 	struct rte_eth_link link;
1299 
1300 	memset(&link, 0, sizeof(link));
1301 	rte_eth_linkstatus_get(dev, &link);
1302 	if (link.link_status) {
1303 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1304 					(int)(dev->data->port_id),
1305 					(unsigned int)link.link_speed,
1306 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1307 					"full-duplex" : "half-duplex");
1308 	} else {
1309 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1310 				(int)(dev->data->port_id));
1311 	}
1312 
1313 
1314 #ifdef DEBUG
1315 {
1316 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1317 
1318 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1319 				pci_dev->addr.domain,
1320 				pci_dev->addr.bus,
1321 				pci_dev->addr.devid,
1322 				pci_dev->addr.function);
1323 }
1324 #endif
1325 
1326 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1327 }
1328 
1329 /*
1330  * It executes link_update after knowing an interrupt occurred.
1331  *
1332  * @param dev
1333  *  Pointer to struct rte_eth_dev.
1334  *
1335  * @return
1336  *  - On success, zero.
1337  *  - On failure, a negative value.
1338  */
1339 static int
1340 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1341 			   struct rte_intr_handle *intr_handle)
1342 {
1343 	struct atl_interrupt *intr =
1344 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1345 	struct atl_adapter *adapter =
1346 		(struct atl_adapter *)dev->data->dev_private;
1347 	struct aq_hw_s *hw = &adapter->hw;
1348 
1349 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1350 		goto done;
1351 
1352 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1353 
1354 	/* Notify userapp if link status changed */
1355 	if (!atl_dev_link_update(dev, 0)) {
1356 		atl_dev_link_status_print(dev);
1357 		_rte_eth_dev_callback_process(dev,
1358 			RTE_ETH_EVENT_INTR_LSC, NULL);
1359 	} else {
1360 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1361 			goto done;
1362 
1363 		/* Check macsec Keys expired */
1364 		struct get_stats req = { 0 };
1365 		struct macsec_msg_fw_request msg = { 0 };
1366 		struct macsec_msg_fw_response resp = { 0 };
1367 
1368 		req.ingress_sa_index = 0x0;
1369 		req.egress_sc_index = 0x0;
1370 		req.egress_sa_index = 0x0;
1371 		msg.msg_type = macsec_get_stats_msg;
1372 		msg.stats = req;
1373 
1374 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1375 		if (err) {
1376 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1377 			goto done;
1378 		}
1379 		if (resp.stats.egress_threshold_expired ||
1380 		    resp.stats.ingress_threshold_expired ||
1381 		    resp.stats.egress_expired ||
1382 		    resp.stats.ingress_expired) {
1383 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1384 			_rte_eth_dev_callback_process(dev,
1385 				RTE_ETH_EVENT_MACSEC, NULL);
1386 		}
1387 	}
1388 done:
1389 	atl_enable_intr(dev);
1390 	rte_intr_enable(intr_handle);
1391 
1392 	return 0;
1393 }
1394 
1395 /**
1396  * Interrupt handler triggered by NIC  for handling
1397  * specific interrupt.
1398  *
1399  * @param handle
1400  *  Pointer to interrupt handle.
1401  * @param param
1402  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1403  *
1404  * @return
1405  *  void
1406  */
1407 static void
1408 atl_dev_interrupt_handler(void *param)
1409 {
1410 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1411 
1412 	atl_dev_interrupt_get_status(dev);
1413 	atl_dev_interrupt_action(dev, dev->intr_handle);
1414 }
1415 
1416 
1417 static int
1418 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1419 {
1420 	return SFP_EEPROM_SIZE;
1421 }
1422 
1423 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1424 		       struct rte_dev_eeprom_info *eeprom)
1425 {
1426 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1427 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1428 
1429 	if (hw->aq_fw_ops->get_eeprom == NULL)
1430 		return -ENOTSUP;
1431 
1432 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1433 	    eeprom->data == NULL)
1434 		return -EINVAL;
1435 
1436 	if (eeprom->magic)
1437 		dev_addr = eeprom->magic;
1438 
1439 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1440 					 eeprom->length, eeprom->offset);
1441 }
1442 
1443 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1444 		       struct rte_dev_eeprom_info *eeprom)
1445 {
1446 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1447 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1448 
1449 	if (hw->aq_fw_ops->set_eeprom == NULL)
1450 		return -ENOTSUP;
1451 
1452 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1453 	    eeprom->data == NULL)
1454 		return -EINVAL;
1455 
1456 	if (eeprom->magic)
1457 		dev_addr = eeprom->magic;
1458 
1459 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1460 					 eeprom->length, eeprom->offset);
1461 }
1462 
1463 static int
1464 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1465 {
1466 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1467 	u32 mif_id;
1468 	int err;
1469 
1470 	if (regs->data == NULL) {
1471 		regs->length = hw_atl_utils_hw_get_reg_length();
1472 		regs->width = sizeof(u32);
1473 		return 0;
1474 	}
1475 
1476 	/* Only full register dump is supported */
1477 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1478 		return -ENOTSUP;
1479 
1480 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1481 
1482 	/* Device version */
1483 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1484 	regs->version = mif_id & 0xFFU;
1485 
1486 	return err;
1487 }
1488 
1489 static int
1490 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1491 {
1492 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1493 
1494 	if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1495 		fc_conf->mode = RTE_FC_NONE;
1496 	else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1497 		fc_conf->mode = RTE_FC_FULL;
1498 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1499 		fc_conf->mode = RTE_FC_RX_PAUSE;
1500 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1501 		fc_conf->mode = RTE_FC_TX_PAUSE;
1502 
1503 	return 0;
1504 }
1505 
1506 static int
1507 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1508 {
1509 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1510 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1511 
1512 
1513 	if (hw->aq_fw_ops->set_flow_control == NULL)
1514 		return -ENOTSUP;
1515 
1516 	if (fc_conf->mode == RTE_FC_NONE)
1517 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1518 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1519 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1520 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1521 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1522 	else if (fc_conf->mode == RTE_FC_FULL)
1523 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1524 
1525 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1526 		return hw->aq_fw_ops->set_flow_control(hw);
1527 
1528 	return 0;
1529 }
1530 
1531 static int
1532 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1533 		    u8 *mac_addr, bool enable)
1534 {
1535 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1536 	unsigned int h = 0U;
1537 	unsigned int l = 0U;
1538 	int err;
1539 
1540 	if (mac_addr) {
1541 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1542 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1543 			(mac_addr[4] << 8) | mac_addr[5];
1544 	}
1545 
1546 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1547 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1548 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1549 
1550 	if (enable)
1551 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1552 
1553 	err = aq_hw_err_from_flags(hw);
1554 
1555 	return err;
1556 }
1557 
1558 static int
1559 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1560 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1561 {
1562 	if (is_zero_ether_addr(mac_addr)) {
1563 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1564 		return -EINVAL;
1565 	}
1566 
1567 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1568 }
1569 
1570 static void
1571 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1572 {
1573 	atl_update_mac_addr(dev, index, NULL, false);
1574 }
1575 
1576 static int
1577 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1578 {
1579 	atl_remove_mac_addr(dev, 0);
1580 	atl_add_mac_addr(dev, addr, 0, 0);
1581 	return 0;
1582 }
1583 
1584 static int
1585 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1586 {
1587 	struct rte_eth_dev_info dev_info;
1588 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1589 
1590 	atl_dev_info_get(dev, &dev_info);
1591 
1592 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1593 		return -EINVAL;
1594 
1595 	/* update max frame size */
1596 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1597 
1598 	return 0;
1599 }
1600 
1601 static int
1602 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1603 {
1604 	struct aq_hw_cfg_s *cfg =
1605 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1606 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1607 	int err = 0;
1608 	int i = 0;
1609 
1610 	PMD_INIT_FUNC_TRACE();
1611 
1612 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1613 		if (cfg->vlan_filter[i] == vlan_id) {
1614 			if (!on) {
1615 				/* Disable VLAN filter. */
1616 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1617 
1618 				/* Clear VLAN filter entry */
1619 				cfg->vlan_filter[i] = 0;
1620 			}
1621 			break;
1622 		}
1623 	}
1624 
1625 	/* VLAN_ID was not found. So, nothing to delete. */
1626 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1627 		goto exit;
1628 
1629 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1630 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1631 		goto exit;
1632 
1633 	/* Try to found free VLAN filter to add new VLAN_ID */
1634 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1635 		if (cfg->vlan_filter[i] == 0)
1636 			break;
1637 	}
1638 
1639 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1640 		/* We have no free VLAN filter to add new VLAN_ID*/
1641 		err = -ENOMEM;
1642 		goto exit;
1643 	}
1644 
1645 	cfg->vlan_filter[i] = vlan_id;
1646 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1647 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1648 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1649 
1650 exit:
1651 	/* Enable VLAN promisc mode if vlan_filter empty  */
1652 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1653 		if (cfg->vlan_filter[i] != 0)
1654 			break;
1655 	}
1656 
1657 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1658 
1659 	return err;
1660 }
1661 
1662 static int
1663 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1664 {
1665 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1666 	struct aq_hw_cfg_s *cfg =
1667 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1668 	int i;
1669 
1670 	PMD_INIT_FUNC_TRACE();
1671 
1672 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1673 		if (cfg->vlan_filter[i])
1674 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1675 	}
1676 	return 0;
1677 }
1678 
1679 static int
1680 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1681 {
1682 	struct aq_hw_cfg_s *cfg =
1683 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1684 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1685 	int ret = 0;
1686 	int i;
1687 
1688 	PMD_INIT_FUNC_TRACE();
1689 
1690 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1691 
1692 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1693 
1694 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1695 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1696 
1697 	if (mask & ETH_VLAN_EXTEND_MASK)
1698 		ret = -ENOTSUP;
1699 
1700 	return ret;
1701 }
1702 
1703 static int
1704 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1705 		  uint16_t tpid)
1706 {
1707 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1708 	int err = 0;
1709 
1710 	PMD_INIT_FUNC_TRACE();
1711 
1712 	switch (vlan_type) {
1713 	case ETH_VLAN_TYPE_INNER:
1714 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1715 		break;
1716 	case ETH_VLAN_TYPE_OUTER:
1717 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1718 		break;
1719 	default:
1720 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1721 		err = -ENOTSUP;
1722 	}
1723 
1724 	return err;
1725 }
1726 
1727 static void
1728 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1729 {
1730 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1731 
1732 	PMD_INIT_FUNC_TRACE();
1733 
1734 	if (queue_id > dev->data->nb_rx_queues) {
1735 		PMD_DRV_LOG(ERR, "Invalid queue id");
1736 		return;
1737 	}
1738 
1739 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1740 }
1741 
1742 static int
1743 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1744 			  struct ether_addr *mc_addr_set,
1745 			  uint32_t nb_mc_addr)
1746 {
1747 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1748 	u32 i;
1749 
1750 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1751 		return -EINVAL;
1752 
1753 	/* Update whole uc filters table */
1754 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1755 		u8 *mac_addr = NULL;
1756 		u32 l = 0, h = 0;
1757 
1758 		if (i < nb_mc_addr) {
1759 			mac_addr = mc_addr_set[i].addr_bytes;
1760 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1761 				(mac_addr[4] << 8) | mac_addr[5];
1762 			h = (mac_addr[0] << 8) | mac_addr[1];
1763 		}
1764 
1765 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1766 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1767 							HW_ATL_B0_MAC_MIN + i);
1768 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1769 							HW_ATL_B0_MAC_MIN + i);
1770 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1771 					   HW_ATL_B0_MAC_MIN + i);
1772 	}
1773 
1774 	return 0;
1775 }
1776 
1777 static int
1778 atl_reta_update(struct rte_eth_dev *dev,
1779 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1780 		   uint16_t reta_size)
1781 {
1782 	int i;
1783 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1784 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1785 
1786 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1787 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1788 					dev->data->nb_rx_queues - 1);
1789 
1790 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1791 	return 0;
1792 }
1793 
1794 static int
1795 atl_reta_query(struct rte_eth_dev *dev,
1796 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1797 		    uint16_t reta_size)
1798 {
1799 	int i;
1800 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1801 
1802 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1803 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1804 	reta_conf->mask = ~0U;
1805 	return 0;
1806 }
1807 
1808 static int
1809 atl_rss_hash_update(struct rte_eth_dev *dev,
1810 				 struct rte_eth_rss_conf *rss_conf)
1811 {
1812 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1813 	struct aq_hw_cfg_s *cfg =
1814 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1815 	static u8 def_rss_key[40] = {
1816 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1817 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1818 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1819 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1820 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1821 	};
1822 
1823 	cfg->is_rss = !!rss_conf->rss_hf;
1824 	if (rss_conf->rss_key) {
1825 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1826 		       rss_conf->rss_key_len);
1827 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1828 	} else {
1829 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1830 		       sizeof(def_rss_key));
1831 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1832 	}
1833 
1834 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1835 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1836 	return 0;
1837 }
1838 
1839 static int
1840 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1841 				 struct rte_eth_rss_conf *rss_conf)
1842 {
1843 	struct aq_hw_cfg_s *cfg =
1844 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1845 
1846 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1847 	if (rss_conf->rss_key) {
1848 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1849 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1850 		       rss_conf->rss_key_len);
1851 	}
1852 
1853 	return 0;
1854 }
1855 
1856 static bool
1857 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1858 {
1859 	if (strcmp(dev->device->driver->name, drv->driver.name))
1860 		return false;
1861 
1862 	return true;
1863 }
1864 
1865 bool
1866 is_atlantic_supported(struct rte_eth_dev *dev)
1867 {
1868 	return is_device_supported(dev, &rte_atl_pmd);
1869 }
1870 
1871 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1872 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1873 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1874 
1875 RTE_INIT(atl_init_log)
1876 {
1877 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1878 	if (atl_logtype_init >= 0)
1879 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1880 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1881 	if (atl_logtype_driver >= 0)
1882 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1883 }
1884