xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision d629b7b5fe812f0040b83d27d2ada33b003aa918)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 				    struct rte_eth_xstat_name *xstats_names,
35 				    unsigned int size);
36 
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 				struct rte_eth_stats *stats);
39 
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 			      struct rte_eth_xstat *stats, unsigned int n);
42 
43 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
44 
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 			      size_t fw_size);
47 
48 static void atl_dev_info_get(struct rte_eth_dev *dev,
49 			       struct rte_eth_dev_info *dev_info);
50 
51 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 
53 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 
55 /* VLAN stuff */
56 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
57 		uint16_t vlan_id, int on);
58 
59 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
60 
61 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
62 				     uint16_t queue_id, int on);
63 
64 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
65 			     enum rte_vlan_type vlan_type, uint16_t tpid);
66 
67 /* EEPROM */
68 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
69 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
72 			      struct rte_dev_eeprom_info *eeprom);
73 
74 /* Regs */
75 static int atl_dev_get_regs(struct rte_eth_dev *dev,
76 			    struct rte_dev_reg_info *regs);
77 
78 /* Flow control */
79 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
82 			       struct rte_eth_fc_conf *fc_conf);
83 
84 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
85 
86 /* Interrupts */
87 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
88 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
89 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
90 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
91 				    struct rte_intr_handle *handle);
92 static void atl_dev_interrupt_handler(void *param);
93 
94 
95 static int atl_add_mac_addr(struct rte_eth_dev *dev,
96 			    struct ether_addr *mac_addr,
97 			    uint32_t index, uint32_t pool);
98 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
99 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
100 					   struct ether_addr *mac_addr);
101 
102 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
103 				    struct ether_addr *mc_addr_set,
104 				    uint32_t nb_mc_addr);
105 
106 /* RSS */
107 static int atl_reta_update(struct rte_eth_dev *dev,
108 			     struct rte_eth_rss_reta_entry64 *reta_conf,
109 			     uint16_t reta_size);
110 static int atl_reta_query(struct rte_eth_dev *dev,
111 			    struct rte_eth_rss_reta_entry64 *reta_conf,
112 			    uint16_t reta_size);
113 static int atl_rss_hash_update(struct rte_eth_dev *dev,
114 				 struct rte_eth_rss_conf *rss_conf);
115 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
116 				   struct rte_eth_rss_conf *rss_conf);
117 
118 
119 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
120 	struct rte_pci_device *pci_dev);
121 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
122 
123 static void atl_dev_info_get(struct rte_eth_dev *dev,
124 				struct rte_eth_dev_info *dev_info);
125 
126 int atl_logtype_init;
127 int atl_logtype_driver;
128 
129 /*
130  * The set of PCI devices this driver supports
131  */
132 static const struct rte_pci_id pci_id_atl_map[] = {
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
138 
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
145 
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
152 
153 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
154 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
155 	{ .vendor_id = 0, /* sentinel */ },
156 };
157 
158 static struct rte_pci_driver rte_atl_pmd = {
159 	.id_table = pci_id_atl_map,
160 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
161 		     RTE_PCI_DRV_IOVA_AS_VA,
162 	.probe = eth_atl_pci_probe,
163 	.remove = eth_atl_pci_remove,
164 };
165 
166 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
167 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
168 			| DEV_RX_OFFLOAD_UDP_CKSUM \
169 			| DEV_RX_OFFLOAD_TCP_CKSUM \
170 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
171 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
172 			| DEV_RX_OFFLOAD_VLAN_FILTER)
173 
174 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
175 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
176 			| DEV_TX_OFFLOAD_UDP_CKSUM \
177 			| DEV_TX_OFFLOAD_TCP_CKSUM \
178 			| DEV_TX_OFFLOAD_TCP_TSO \
179 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
180 			| DEV_TX_OFFLOAD_MULTI_SEGS)
181 
182 static const struct rte_eth_desc_lim rx_desc_lim = {
183 	.nb_max = ATL_MAX_RING_DESC,
184 	.nb_min = ATL_MIN_RING_DESC,
185 	.nb_align = ATL_RXD_ALIGN,
186 };
187 
188 static const struct rte_eth_desc_lim tx_desc_lim = {
189 	.nb_max = ATL_MAX_RING_DESC,
190 	.nb_min = ATL_MIN_RING_DESC,
191 	.nb_align = ATL_TXD_ALIGN,
192 	.nb_seg_max = ATL_TX_MAX_SEG,
193 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
194 };
195 
196 enum atl_xstats_type {
197 	XSTATS_TYPE_MSM = 0,
198 	XSTATS_TYPE_MACSEC,
199 };
200 
201 #define ATL_XSTATS_FIELD(name) { \
202 	#name, \
203 	offsetof(struct aq_stats_s, name), \
204 	XSTATS_TYPE_MSM \
205 }
206 
207 #define ATL_MACSEC_XSTATS_FIELD(name) { \
208 	#name, \
209 	offsetof(struct macsec_stats, name), \
210 	XSTATS_TYPE_MACSEC \
211 }
212 
213 struct atl_xstats_tbl_s {
214 	const char *name;
215 	unsigned int offset;
216 	enum atl_xstats_type type;
217 };
218 
219 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
220 	ATL_XSTATS_FIELD(uprc),
221 	ATL_XSTATS_FIELD(mprc),
222 	ATL_XSTATS_FIELD(bprc),
223 	ATL_XSTATS_FIELD(erpt),
224 	ATL_XSTATS_FIELD(uptc),
225 	ATL_XSTATS_FIELD(mptc),
226 	ATL_XSTATS_FIELD(bptc),
227 	ATL_XSTATS_FIELD(erpr),
228 	ATL_XSTATS_FIELD(ubrc),
229 	ATL_XSTATS_FIELD(ubtc),
230 	ATL_XSTATS_FIELD(mbrc),
231 	ATL_XSTATS_FIELD(mbtc),
232 	ATL_XSTATS_FIELD(bbrc),
233 	ATL_XSTATS_FIELD(bbtc),
234 	/* Ingress Common Counters */
235 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
236 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
237 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
239 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
243 	/* Ingress SA Counters */
244 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
245 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
246 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
247 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
250 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
251 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
252 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
253 	/* Egress Common Counters */
254 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
255 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
256 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
258 	/* Egress SC Counters */
259 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
260 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
261 	/* Egress SA Counters */
262 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
263 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
264 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
265 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
266 };
267 
268 static const struct eth_dev_ops atl_eth_dev_ops = {
269 	.dev_configure	      = atl_dev_configure,
270 	.dev_start	      = atl_dev_start,
271 	.dev_stop	      = atl_dev_stop,
272 	.dev_set_link_up      = atl_dev_set_link_up,
273 	.dev_set_link_down    = atl_dev_set_link_down,
274 	.dev_close	      = atl_dev_close,
275 	.dev_reset	      = atl_dev_reset,
276 
277 	/* PROMISC */
278 	.promiscuous_enable   = atl_dev_promiscuous_enable,
279 	.promiscuous_disable  = atl_dev_promiscuous_disable,
280 	.allmulticast_enable  = atl_dev_allmulticast_enable,
281 	.allmulticast_disable = atl_dev_allmulticast_disable,
282 
283 	/* Link */
284 	.link_update	      = atl_dev_link_update,
285 
286 	.get_reg              = atl_dev_get_regs,
287 
288 	/* Stats */
289 	.stats_get	      = atl_dev_stats_get,
290 	.xstats_get	      = atl_dev_xstats_get,
291 	.xstats_get_names     = atl_dev_xstats_get_names,
292 	.stats_reset	      = atl_dev_stats_reset,
293 	.xstats_reset	      = atl_dev_stats_reset,
294 
295 	.fw_version_get       = atl_fw_version_get,
296 	.dev_infos_get	      = atl_dev_info_get,
297 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
298 
299 	.mtu_set              = atl_dev_mtu_set,
300 
301 	/* VLAN */
302 	.vlan_filter_set      = atl_vlan_filter_set,
303 	.vlan_offload_set     = atl_vlan_offload_set,
304 	.vlan_tpid_set        = atl_vlan_tpid_set,
305 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
306 
307 	/* Queue Control */
308 	.rx_queue_start	      = atl_rx_queue_start,
309 	.rx_queue_stop	      = atl_rx_queue_stop,
310 	.rx_queue_setup       = atl_rx_queue_setup,
311 	.rx_queue_release     = atl_rx_queue_release,
312 
313 	.tx_queue_start	      = atl_tx_queue_start,
314 	.tx_queue_stop	      = atl_tx_queue_stop,
315 	.tx_queue_setup       = atl_tx_queue_setup,
316 	.tx_queue_release     = atl_tx_queue_release,
317 
318 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
319 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
320 
321 	.rx_queue_count       = atl_rx_queue_count,
322 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
323 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
324 
325 	/* EEPROM */
326 	.get_eeprom_length    = atl_dev_get_eeprom_length,
327 	.get_eeprom           = atl_dev_get_eeprom,
328 	.set_eeprom           = atl_dev_set_eeprom,
329 
330 	/* Flow Control */
331 	.flow_ctrl_get	      = atl_flow_ctrl_get,
332 	.flow_ctrl_set	      = atl_flow_ctrl_set,
333 
334 	/* MAC */
335 	.mac_addr_add	      = atl_add_mac_addr,
336 	.mac_addr_remove      = atl_remove_mac_addr,
337 	.mac_addr_set	      = atl_set_default_mac_addr,
338 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
339 	.rxq_info_get	      = atl_rxq_info_get,
340 	.txq_info_get	      = atl_txq_info_get,
341 
342 	.reta_update          = atl_reta_update,
343 	.reta_query           = atl_reta_query,
344 	.rss_hash_update      = atl_rss_hash_update,
345 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
346 };
347 
348 static inline int32_t
349 atl_reset_hw(struct aq_hw_s *hw)
350 {
351 	return hw_atl_b0_hw_reset(hw);
352 }
353 
354 static inline void
355 atl_enable_intr(struct rte_eth_dev *dev)
356 {
357 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
358 
359 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
360 }
361 
362 static void
363 atl_disable_intr(struct aq_hw_s *hw)
364 {
365 	PMD_INIT_FUNC_TRACE();
366 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
367 }
368 
369 static int
370 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
371 {
372 	struct atl_adapter *adapter =
373 		(struct atl_adapter *)eth_dev->data->dev_private;
374 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
375 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
376 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
377 	int err = 0;
378 
379 	PMD_INIT_FUNC_TRACE();
380 
381 	eth_dev->dev_ops = &atl_eth_dev_ops;
382 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
383 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
384 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
385 
386 	/* For secondary processes, the primary process has done all the work */
387 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
388 		return 0;
389 
390 	/* Vendor and Device ID need to be set before init of shared code */
391 	hw->device_id = pci_dev->id.device_id;
392 	hw->vendor_id = pci_dev->id.vendor_id;
393 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
394 
395 	/* Hardware configuration - hardcode */
396 	adapter->hw_cfg.is_lro = false;
397 	adapter->hw_cfg.wol = false;
398 	adapter->hw_cfg.is_rss = false;
399 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
400 
401 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
402 			  AQ_NIC_RATE_5G |
403 			  AQ_NIC_RATE_2G5 |
404 			  AQ_NIC_RATE_1G |
405 			  AQ_NIC_RATE_100M;
406 
407 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
408 	adapter->hw_cfg.aq_rss.indirection_table_size =
409 		HW_ATL_B0_RSS_REDIRECTION_MAX;
410 
411 	hw->aq_nic_cfg = &adapter->hw_cfg;
412 
413 	/* disable interrupt */
414 	atl_disable_intr(hw);
415 
416 	/* Allocate memory for storing MAC addresses */
417 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
418 	if (eth_dev->data->mac_addrs == NULL) {
419 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
420 		return -ENOMEM;
421 	}
422 
423 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
424 	if (err)
425 		return err;
426 
427 	/* Copy the permanent MAC address */
428 	if (hw->aq_fw_ops->get_mac_permanent(hw,
429 			eth_dev->data->mac_addrs->addr_bytes) != 0)
430 		return -EINVAL;
431 
432 	/* Reset the hw statistics */
433 	atl_dev_stats_reset(eth_dev);
434 
435 	rte_intr_callback_register(intr_handle,
436 				   atl_dev_interrupt_handler, eth_dev);
437 
438 	/* enable uio/vfio intr/eventfd mapping */
439 	rte_intr_enable(intr_handle);
440 
441 	/* enable support intr */
442 	atl_enable_intr(eth_dev);
443 
444 	return err;
445 }
446 
447 static int
448 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
449 {
450 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
451 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
452 	struct aq_hw_s *hw;
453 
454 	PMD_INIT_FUNC_TRACE();
455 
456 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
457 		return -EPERM;
458 
459 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
460 
461 	if (hw->adapter_stopped == 0)
462 		atl_dev_close(eth_dev);
463 
464 	eth_dev->dev_ops = NULL;
465 	eth_dev->rx_pkt_burst = NULL;
466 	eth_dev->tx_pkt_burst = NULL;
467 
468 	/* disable uio intr before callback unregister */
469 	rte_intr_disable(intr_handle);
470 	rte_intr_callback_unregister(intr_handle,
471 				     atl_dev_interrupt_handler, eth_dev);
472 
473 	rte_free(eth_dev->data->mac_addrs);
474 	eth_dev->data->mac_addrs = NULL;
475 
476 	return 0;
477 }
478 
479 static int
480 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
481 	struct rte_pci_device *pci_dev)
482 {
483 	return rte_eth_dev_pci_generic_probe(pci_dev,
484 		sizeof(struct atl_adapter), eth_atl_dev_init);
485 }
486 
487 static int
488 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
489 {
490 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
491 }
492 
493 static int
494 atl_dev_configure(struct rte_eth_dev *dev)
495 {
496 	struct atl_interrupt *intr =
497 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
498 
499 	PMD_INIT_FUNC_TRACE();
500 
501 	/* set flag to update link status after init */
502 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
503 
504 	return 0;
505 }
506 
507 /*
508  * Configure device link speed and setup link.
509  * It returns 0 on success.
510  */
511 static int
512 atl_dev_start(struct rte_eth_dev *dev)
513 {
514 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
516 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
517 	uint32_t intr_vector = 0;
518 	int status;
519 	int err;
520 
521 	PMD_INIT_FUNC_TRACE();
522 
523 	/* set adapter started */
524 	hw->adapter_stopped = 0;
525 
526 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
527 		PMD_INIT_LOG(ERR,
528 		"Invalid link_speeds for port %u, fix speed not supported",
529 				dev->data->port_id);
530 		return -EINVAL;
531 	}
532 
533 	/* disable uio/vfio intr/eventfd mapping */
534 	rte_intr_disable(intr_handle);
535 
536 	/* reinitialize adapter
537 	 * this calls reset and start
538 	 */
539 	status = atl_reset_hw(hw);
540 	if (status != 0)
541 		return -EIO;
542 
543 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
544 
545 	hw_atl_b0_hw_start(hw);
546 	/* check and configure queue intr-vector mapping */
547 	if ((rte_intr_cap_multiple(intr_handle) ||
548 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
549 	    dev->data->dev_conf.intr_conf.rxq != 0) {
550 		intr_vector = dev->data->nb_rx_queues;
551 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
552 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
553 					ATL_MAX_INTR_QUEUE_NUM);
554 			return -ENOTSUP;
555 		}
556 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
557 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
558 			return -1;
559 		}
560 	}
561 
562 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
563 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
564 				    dev->data->nb_rx_queues * sizeof(int), 0);
565 		if (intr_handle->intr_vec == NULL) {
566 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
567 				     " intr_vec", dev->data->nb_rx_queues);
568 			return -ENOMEM;
569 		}
570 	}
571 
572 	/* initialize transmission unit */
573 	atl_tx_init(dev);
574 
575 	/* This can fail when allocating mbufs for descriptor rings */
576 	err = atl_rx_init(dev);
577 	if (err) {
578 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
579 		goto error;
580 	}
581 
582 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
583 		hw->fw_ver_actual >> 24,
584 		(hw->fw_ver_actual >> 16) & 0xFF,
585 		hw->fw_ver_actual & 0xFFFF);
586 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
587 
588 	err = atl_start_queues(dev);
589 	if (err < 0) {
590 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
591 		goto error;
592 	}
593 
594 	err = atl_dev_set_link_up(dev);
595 
596 	err = hw->aq_fw_ops->update_link_status(hw);
597 
598 	if (err)
599 		goto error;
600 
601 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
602 
603 	if (err)
604 		goto error;
605 
606 	if (rte_intr_allow_others(intr_handle)) {
607 		/* check if lsc interrupt is enabled */
608 		if (dev->data->dev_conf.intr_conf.lsc != 0)
609 			atl_dev_lsc_interrupt_setup(dev, true);
610 		else
611 			atl_dev_lsc_interrupt_setup(dev, false);
612 	} else {
613 		rte_intr_callback_unregister(intr_handle,
614 					     atl_dev_interrupt_handler, dev);
615 		if (dev->data->dev_conf.intr_conf.lsc != 0)
616 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
617 				     " no intr multiplex");
618 	}
619 
620 	/* check if rxq interrupt is enabled */
621 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
622 	    rte_intr_dp_is_en(intr_handle))
623 		atl_dev_rxq_interrupt_setup(dev);
624 
625 	/* enable uio/vfio intr/eventfd mapping */
626 	rte_intr_enable(intr_handle);
627 
628 	/* resume enabled intr since hw reset */
629 	atl_enable_intr(dev);
630 
631 	return 0;
632 
633 error:
634 	atl_stop_queues(dev);
635 	return -EIO;
636 }
637 
638 /*
639  * Stop device: disable rx and tx functions to allow for reconfiguring.
640  */
641 static void
642 atl_dev_stop(struct rte_eth_dev *dev)
643 {
644 	struct rte_eth_link link;
645 	struct aq_hw_s *hw =
646 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
647 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
648 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
649 
650 	PMD_INIT_FUNC_TRACE();
651 
652 	/* disable interrupts */
653 	atl_disable_intr(hw);
654 
655 	/* reset the NIC */
656 	atl_reset_hw(hw);
657 	hw->adapter_stopped = 1;
658 
659 	atl_stop_queues(dev);
660 
661 	/* Clear stored conf */
662 	dev->data->scattered_rx = 0;
663 	dev->data->lro = 0;
664 
665 	/* Clear recorded link status */
666 	memset(&link, 0, sizeof(link));
667 	rte_eth_linkstatus_set(dev, &link);
668 
669 	if (!rte_intr_allow_others(intr_handle))
670 		/* resume to the default handler */
671 		rte_intr_callback_register(intr_handle,
672 					   atl_dev_interrupt_handler,
673 					   (void *)dev);
674 
675 	/* Clean datapath event and queue/vec mapping */
676 	rte_intr_efd_disable(intr_handle);
677 	if (intr_handle->intr_vec != NULL) {
678 		rte_free(intr_handle->intr_vec);
679 		intr_handle->intr_vec = NULL;
680 	}
681 }
682 
683 /*
684  * Set device link up: enable tx.
685  */
686 static int
687 atl_dev_set_link_up(struct rte_eth_dev *dev)
688 {
689 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
691 	uint32_t speed_mask = 0;
692 
693 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
694 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
695 	} else {
696 		if (link_speeds & ETH_LINK_SPEED_10G)
697 			speed_mask |= AQ_NIC_RATE_10G;
698 		if (link_speeds & ETH_LINK_SPEED_5G)
699 			speed_mask |= AQ_NIC_RATE_5G;
700 		if (link_speeds & ETH_LINK_SPEED_1G)
701 			speed_mask |= AQ_NIC_RATE_1G;
702 		if (link_speeds & ETH_LINK_SPEED_2_5G)
703 			speed_mask |=  AQ_NIC_RATE_2G5;
704 		if (link_speeds & ETH_LINK_SPEED_100M)
705 			speed_mask |= AQ_NIC_RATE_100M;
706 	}
707 
708 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
709 }
710 
711 /*
712  * Set device link down: disable tx.
713  */
714 static int
715 atl_dev_set_link_down(struct rte_eth_dev *dev)
716 {
717 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
718 
719 	return hw->aq_fw_ops->set_link_speed(hw, 0);
720 }
721 
722 /*
723  * Reset and stop device.
724  */
725 static void
726 atl_dev_close(struct rte_eth_dev *dev)
727 {
728 	PMD_INIT_FUNC_TRACE();
729 
730 	atl_dev_stop(dev);
731 
732 	atl_free_queues(dev);
733 }
734 
735 static int
736 atl_dev_reset(struct rte_eth_dev *dev)
737 {
738 	int ret;
739 
740 	ret = eth_atl_dev_uninit(dev);
741 	if (ret)
742 		return ret;
743 
744 	ret = eth_atl_dev_init(dev);
745 
746 	return ret;
747 }
748 
749 static int
750 atl_dev_configure_macsec(struct rte_eth_dev *dev)
751 {
752 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
753 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
754 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
755 	struct macsec_msg_fw_request msg_macsec;
756 	struct macsec_msg_fw_response response;
757 
758 	if (!aqcfg->common.macsec_enabled ||
759 	    hw->aq_fw_ops->send_macsec_req == NULL)
760 		return 0;
761 
762 	memset(&msg_macsec, 0, sizeof(msg_macsec));
763 
764 	/* Creating set of sc/sa structures from parameters provided by DPDK */
765 
766 	/* Configure macsec */
767 	msg_macsec.msg_type = macsec_cfg_msg;
768 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
769 	msg_macsec.cfg.interrupts_enabled = 1;
770 
771 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
772 
773 	if (response.result)
774 		return -1;
775 
776 	memset(&msg_macsec, 0, sizeof(msg_macsec));
777 
778 	/* Configure TX SC */
779 
780 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
781 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
782 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
783 
784 	/* MAC addr for TX */
785 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
786 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
787 	msg_macsec.txsc.sa_mask = 0x3f;
788 
789 	msg_macsec.txsc.da_mask = 0;
790 	msg_macsec.txsc.tci = 0x0B;
791 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
792 
793 	/*
794 	 * Creating SCI (Secure Channel Identifier).
795 	 * SCI constructed from Source MAC and Port identifier
796 	 */
797 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
798 			       (msg_macsec.txsc.mac_sa[0] >> 16);
799 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
800 
801 	uint32_t port_identifier = 1;
802 
803 	msg_macsec.txsc.sci[1] = sci_hi_part;
804 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
805 
806 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
807 
808 	if (response.result)
809 		return -1;
810 
811 	memset(&msg_macsec, 0, sizeof(msg_macsec));
812 
813 	/* Configure RX SC */
814 
815 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
816 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
817 	msg_macsec.rxsc.replay_protect =
818 		aqcfg->common.replay_protection_enabled;
819 	msg_macsec.rxsc.anti_replay_window = 0;
820 
821 	/* MAC addr for RX */
822 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
823 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
824 	msg_macsec.rxsc.da_mask = 0;//0x3f;
825 
826 	msg_macsec.rxsc.sa_mask = 0;
827 
828 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
829 
830 	if (response.result)
831 		return -1;
832 
833 	memset(&msg_macsec, 0, sizeof(msg_macsec));
834 
835 	/* Configure RX SC */
836 
837 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
838 	msg_macsec.txsa.index = aqcfg->txsa.idx;
839 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
840 
841 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
842 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
843 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
844 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
845 
846 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
847 
848 	if (response.result)
849 		return -1;
850 
851 	memset(&msg_macsec, 0, sizeof(msg_macsec));
852 
853 	/* Configure RX SA */
854 
855 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
856 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
857 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
858 
859 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
860 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
861 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
862 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
863 
864 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
865 
866 	if (response.result)
867 		return -1;
868 
869 	return 0;
870 }
871 
872 int atl_macsec_enable(struct rte_eth_dev *dev,
873 		      uint8_t encr, uint8_t repl_prot)
874 {
875 	struct aq_hw_cfg_s *cfg =
876 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
877 
878 	cfg->aq_macsec.common.macsec_enabled = 1;
879 	cfg->aq_macsec.common.encryption_enabled = encr;
880 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
881 
882 	return 0;
883 }
884 
885 int atl_macsec_disable(struct rte_eth_dev *dev)
886 {
887 	struct aq_hw_cfg_s *cfg =
888 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
889 
890 	cfg->aq_macsec.common.macsec_enabled = 0;
891 
892 	return 0;
893 }
894 
895 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
896 {
897 	struct aq_hw_cfg_s *cfg =
898 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
899 
900 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
901 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
902 
903 	return 0;
904 }
905 
906 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
907 			   uint8_t *mac, uint16_t pi)
908 {
909 	struct aq_hw_cfg_s *cfg =
910 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
911 
912 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
913 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
914 	cfg->aq_macsec.rxsc.pi = pi;
915 
916 	return 0;
917 }
918 
919 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
920 			   uint8_t idx, uint8_t an,
921 			   uint32_t pn, uint8_t *key)
922 {
923 	struct aq_hw_cfg_s *cfg =
924 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
925 
926 	cfg->aq_macsec.txsa.idx = idx;
927 	cfg->aq_macsec.txsa.pn = pn;
928 	cfg->aq_macsec.txsa.an = an;
929 
930 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
931 	return 0;
932 }
933 
934 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
935 			   uint8_t idx, uint8_t an,
936 			   uint32_t pn, uint8_t *key)
937 {
938 	struct aq_hw_cfg_s *cfg =
939 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
940 
941 	cfg->aq_macsec.rxsa.idx = idx;
942 	cfg->aq_macsec.rxsa.pn = pn;
943 	cfg->aq_macsec.rxsa.an = an;
944 
945 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
946 	return 0;
947 }
948 
949 static int
950 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
951 {
952 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
953 	struct aq_hw_s *hw = &adapter->hw;
954 	struct atl_sw_stats *swstats = &adapter->sw_stats;
955 	unsigned int i;
956 
957 	hw->aq_fw_ops->update_stats(hw);
958 
959 	/* Fill out the rte_eth_stats statistics structure */
960 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
961 	stats->ibytes = hw->curr_stats.dma_oct_rc;
962 	stats->imissed = hw->curr_stats.dpc;
963 	stats->ierrors = hw->curr_stats.erpt;
964 
965 	stats->opackets = hw->curr_stats.dma_pkt_tc;
966 	stats->obytes = hw->curr_stats.dma_oct_tc;
967 	stats->oerrors = 0;
968 
969 	stats->rx_nombuf = swstats->rx_nombuf;
970 
971 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
972 		stats->q_ipackets[i] = swstats->q_ipackets[i];
973 		stats->q_opackets[i] = swstats->q_opackets[i];
974 		stats->q_ibytes[i] = swstats->q_ibytes[i];
975 		stats->q_obytes[i] = swstats->q_obytes[i];
976 		stats->q_errors[i] = swstats->q_errors[i];
977 	}
978 	return 0;
979 }
980 
981 static void
982 atl_dev_stats_reset(struct rte_eth_dev *dev)
983 {
984 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
985 	struct aq_hw_s *hw = &adapter->hw;
986 
987 	hw->aq_fw_ops->update_stats(hw);
988 
989 	/* Reset software totals */
990 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
991 
992 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
993 }
994 
995 static int
996 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
997 			 struct rte_eth_xstat_name *xstats_names,
998 			 unsigned int size)
999 {
1000 	unsigned int i;
1001 
1002 	if (!xstats_names)
1003 		return RTE_DIM(atl_xstats_tbl);
1004 
1005 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
1006 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
1007 			RTE_ETH_XSTATS_NAME_SIZE);
1008 
1009 	return i;
1010 }
1011 
1012 static int
1013 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1014 		   unsigned int n)
1015 {
1016 	struct atl_adapter *adapter =
1017 	(struct atl_adapter *)dev->data->dev_private;
1018 	struct aq_hw_s *hw = &adapter->hw;
1019 	struct get_stats req = { 0 };
1020 	struct macsec_msg_fw_request msg = { 0 };
1021 	struct macsec_msg_fw_response resp = { 0 };
1022 	int err = -1;
1023 	unsigned int i;
1024 
1025 	if (!stats)
1026 		return 0;
1027 
1028 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1029 		req.ingress_sa_index = 0xff;
1030 		req.egress_sc_index = 0xff;
1031 		req.egress_sa_index = 0xff;
1032 
1033 		msg.msg_type = macsec_get_stats_msg;
1034 		msg.stats = req;
1035 
1036 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1037 	}
1038 
1039 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
1040 		stats[i].id = i;
1041 
1042 		switch (atl_xstats_tbl[i].type) {
1043 		case XSTATS_TYPE_MSM:
1044 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1045 					 atl_xstats_tbl[i].offset);
1046 			break;
1047 		case XSTATS_TYPE_MACSEC:
1048 			if (err)
1049 				goto done;
1050 			stats[i].value = *(u64 *)((uint8_t *)&resp.stats +
1051 					 atl_xstats_tbl[i].offset);
1052 			break;
1053 		}
1054 	}
1055 done:
1056 	return i;
1057 }
1058 
1059 static int
1060 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1061 {
1062 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1063 	uint32_t fw_ver = 0;
1064 	unsigned int ret = 0;
1065 
1066 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1067 	if (ret)
1068 		return -EIO;
1069 
1070 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1071 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1072 
1073 	ret += 1; /* add string null-terminator */
1074 
1075 	if (fw_size < ret)
1076 		return ret;
1077 
1078 	return 0;
1079 }
1080 
1081 static void
1082 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1083 {
1084 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1085 
1086 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1087 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1088 
1089 	dev_info->min_rx_bufsize = 1024;
1090 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1091 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1092 	dev_info->max_vfs = pci_dev->max_vfs;
1093 
1094 	dev_info->max_hash_mac_addrs = 0;
1095 	dev_info->max_vmdq_pools = 0;
1096 	dev_info->vmdq_queue_num = 0;
1097 
1098 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1099 
1100 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1101 
1102 
1103 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1104 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1105 	};
1106 
1107 	dev_info->default_txconf = (struct rte_eth_txconf) {
1108 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1109 	};
1110 
1111 	dev_info->rx_desc_lim = rx_desc_lim;
1112 	dev_info->tx_desc_lim = tx_desc_lim;
1113 
1114 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1115 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1116 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1117 
1118 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1119 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1120 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1121 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1122 }
1123 
1124 static const uint32_t *
1125 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1126 {
1127 	static const uint32_t ptypes[] = {
1128 		RTE_PTYPE_L2_ETHER,
1129 		RTE_PTYPE_L2_ETHER_ARP,
1130 		RTE_PTYPE_L2_ETHER_VLAN,
1131 		RTE_PTYPE_L3_IPV4,
1132 		RTE_PTYPE_L3_IPV6,
1133 		RTE_PTYPE_L4_TCP,
1134 		RTE_PTYPE_L4_UDP,
1135 		RTE_PTYPE_L4_SCTP,
1136 		RTE_PTYPE_L4_ICMP,
1137 		RTE_PTYPE_UNKNOWN
1138 	};
1139 
1140 	if (dev->rx_pkt_burst == atl_recv_pkts)
1141 		return ptypes;
1142 
1143 	return NULL;
1144 }
1145 
1146 static void
1147 atl_dev_delayed_handler(void *param)
1148 {
1149 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1150 
1151 	atl_dev_configure_macsec(dev);
1152 }
1153 
1154 
1155 /* return 0 means link status changed, -1 means not changed */
1156 static int
1157 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1158 {
1159 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1160 	struct rte_eth_link link, old;
1161 	int err = 0;
1162 
1163 	link.link_status = ETH_LINK_DOWN;
1164 	link.link_speed = 0;
1165 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1166 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1167 	memset(&old, 0, sizeof(old));
1168 
1169 	/* load old link status */
1170 	rte_eth_linkstatus_get(dev, &old);
1171 
1172 	/* read current link status */
1173 	err = hw->aq_fw_ops->update_link_status(hw);
1174 
1175 	if (err)
1176 		return 0;
1177 
1178 	if (hw->aq_link_status.mbps == 0) {
1179 		/* write default (down) link status */
1180 		rte_eth_linkstatus_set(dev, &link);
1181 		if (link.link_status == old.link_status)
1182 			return -1;
1183 		return 0;
1184 	}
1185 
1186 	link.link_status = ETH_LINK_UP;
1187 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1188 	link.link_speed = hw->aq_link_status.mbps;
1189 
1190 	rte_eth_linkstatus_set(dev, &link);
1191 
1192 	if (link.link_status == old.link_status)
1193 		return -1;
1194 
1195 	if (rte_eal_alarm_set(1000 * 1000,
1196 			      atl_dev_delayed_handler, (void *)dev) < 0)
1197 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1198 
1199 	return 0;
1200 }
1201 
1202 static void
1203 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1204 {
1205 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1206 
1207 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1208 }
1209 
1210 static void
1211 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1212 {
1213 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1214 
1215 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1216 }
1217 
1218 static void
1219 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1220 {
1221 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1222 
1223 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1224 }
1225 
1226 static void
1227 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1228 {
1229 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230 
1231 	if (dev->data->promiscuous == 1)
1232 		return; /* must remain in all_multicast mode */
1233 
1234 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1235 }
1236 
1237 /**
1238  * It clears the interrupt causes and enables the interrupt.
1239  * It will be called once only during nic initialized.
1240  *
1241  * @param dev
1242  *  Pointer to struct rte_eth_dev.
1243  * @param on
1244  *  Enable or Disable.
1245  *
1246  * @return
1247  *  - On success, zero.
1248  *  - On failure, a negative value.
1249  */
1250 
1251 static int
1252 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1253 {
1254 	atl_dev_link_status_print(dev);
1255 	return 0;
1256 }
1257 
1258 static int
1259 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1260 {
1261 	return 0;
1262 }
1263 
1264 
1265 static int
1266 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1267 {
1268 	struct atl_interrupt *intr =
1269 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1270 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1271 	u64 cause = 0;
1272 
1273 	hw_atl_b0_hw_irq_read(hw, &cause);
1274 
1275 	atl_disable_intr(hw);
1276 
1277 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1278 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1279 
1280 	return 0;
1281 }
1282 
1283 /**
1284  * It gets and then prints the link status.
1285  *
1286  * @param dev
1287  *  Pointer to struct rte_eth_dev.
1288  *
1289  * @return
1290  *  - On success, zero.
1291  *  - On failure, a negative value.
1292  */
1293 static void
1294 atl_dev_link_status_print(struct rte_eth_dev *dev)
1295 {
1296 	struct rte_eth_link link;
1297 
1298 	memset(&link, 0, sizeof(link));
1299 	rte_eth_linkstatus_get(dev, &link);
1300 	if (link.link_status) {
1301 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1302 					(int)(dev->data->port_id),
1303 					(unsigned int)link.link_speed,
1304 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1305 					"full-duplex" : "half-duplex");
1306 	} else {
1307 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1308 				(int)(dev->data->port_id));
1309 	}
1310 
1311 
1312 #ifdef DEBUG
1313 {
1314 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1315 
1316 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1317 				pci_dev->addr.domain,
1318 				pci_dev->addr.bus,
1319 				pci_dev->addr.devid,
1320 				pci_dev->addr.function);
1321 }
1322 #endif
1323 
1324 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1325 }
1326 
1327 /*
1328  * It executes link_update after knowing an interrupt occurred.
1329  *
1330  * @param dev
1331  *  Pointer to struct rte_eth_dev.
1332  *
1333  * @return
1334  *  - On success, zero.
1335  *  - On failure, a negative value.
1336  */
1337 static int
1338 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1339 			   struct rte_intr_handle *intr_handle)
1340 {
1341 	struct atl_interrupt *intr =
1342 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1343 	struct atl_adapter *adapter =
1344 		(struct atl_adapter *)dev->data->dev_private;
1345 	struct aq_hw_s *hw = &adapter->hw;
1346 
1347 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1348 		goto done;
1349 
1350 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1351 
1352 	/* Notify userapp if link status changed */
1353 	if (!atl_dev_link_update(dev, 0)) {
1354 		atl_dev_link_status_print(dev);
1355 		_rte_eth_dev_callback_process(dev,
1356 			RTE_ETH_EVENT_INTR_LSC, NULL);
1357 	} else {
1358 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1359 			goto done;
1360 
1361 		/* Check macsec Keys expired */
1362 		struct get_stats req = { 0 };
1363 		struct macsec_msg_fw_request msg = { 0 };
1364 		struct macsec_msg_fw_response resp = { 0 };
1365 
1366 		req.ingress_sa_index = 0x0;
1367 		req.egress_sc_index = 0x0;
1368 		req.egress_sa_index = 0x0;
1369 		msg.msg_type = macsec_get_stats_msg;
1370 		msg.stats = req;
1371 
1372 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1373 		if (err) {
1374 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1375 			goto done;
1376 		}
1377 		if (resp.stats.egress_threshold_expired ||
1378 		    resp.stats.ingress_threshold_expired ||
1379 		    resp.stats.egress_expired ||
1380 		    resp.stats.ingress_expired) {
1381 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1382 			_rte_eth_dev_callback_process(dev,
1383 				RTE_ETH_EVENT_MACSEC, NULL);
1384 		}
1385 	}
1386 done:
1387 	atl_enable_intr(dev);
1388 	rte_intr_enable(intr_handle);
1389 
1390 	return 0;
1391 }
1392 
1393 /**
1394  * Interrupt handler triggered by NIC  for handling
1395  * specific interrupt.
1396  *
1397  * @param handle
1398  *  Pointer to interrupt handle.
1399  * @param param
1400  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1401  *
1402  * @return
1403  *  void
1404  */
1405 static void
1406 atl_dev_interrupt_handler(void *param)
1407 {
1408 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1409 
1410 	atl_dev_interrupt_get_status(dev);
1411 	atl_dev_interrupt_action(dev, dev->intr_handle);
1412 }
1413 
1414 #define SFP_EEPROM_SIZE 0xff
1415 
1416 static int
1417 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1418 {
1419 	return SFP_EEPROM_SIZE;
1420 }
1421 
1422 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1423 		       struct rte_dev_eeprom_info *eeprom)
1424 {
1425 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1426 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1427 
1428 	if (hw->aq_fw_ops->get_eeprom == NULL)
1429 		return -ENOTSUP;
1430 
1431 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1432 	    eeprom->data == NULL)
1433 		return -EINVAL;
1434 
1435 	if (eeprom->magic)
1436 		dev_addr = eeprom->magic;
1437 
1438 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1439 					 eeprom->length, eeprom->offset);
1440 }
1441 
1442 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1443 		       struct rte_dev_eeprom_info *eeprom)
1444 {
1445 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1446 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1447 
1448 	if (hw->aq_fw_ops->set_eeprom == NULL)
1449 		return -ENOTSUP;
1450 
1451 	if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1452 		return -EINVAL;
1453 
1454 	if (eeprom->magic)
1455 		dev_addr = eeprom->magic;
1456 
1457 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr,
1458 					 eeprom->data, eeprom->length);
1459 }
1460 
1461 static int
1462 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1463 {
1464 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1465 	u32 mif_id;
1466 	int err;
1467 
1468 	if (regs->data == NULL) {
1469 		regs->length = hw_atl_utils_hw_get_reg_length();
1470 		regs->width = sizeof(u32);
1471 		return 0;
1472 	}
1473 
1474 	/* Only full register dump is supported */
1475 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1476 		return -ENOTSUP;
1477 
1478 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1479 
1480 	/* Device version */
1481 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1482 	regs->version = mif_id & 0xFFU;
1483 
1484 	return err;
1485 }
1486 
1487 static int
1488 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1489 {
1490 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1491 
1492 	if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1493 		fc_conf->mode = RTE_FC_NONE;
1494 	else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1495 		fc_conf->mode = RTE_FC_FULL;
1496 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1497 		fc_conf->mode = RTE_FC_RX_PAUSE;
1498 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1499 		fc_conf->mode = RTE_FC_TX_PAUSE;
1500 
1501 	return 0;
1502 }
1503 
1504 static int
1505 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1506 {
1507 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1508 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1509 
1510 
1511 	if (hw->aq_fw_ops->set_flow_control == NULL)
1512 		return -ENOTSUP;
1513 
1514 	if (fc_conf->mode == RTE_FC_NONE)
1515 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1516 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1517 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1518 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1519 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1520 	else if (fc_conf->mode == RTE_FC_FULL)
1521 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1522 
1523 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1524 		return hw->aq_fw_ops->set_flow_control(hw);
1525 
1526 	return 0;
1527 }
1528 
1529 static int
1530 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1531 		    u8 *mac_addr, bool enable)
1532 {
1533 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1534 	unsigned int h = 0U;
1535 	unsigned int l = 0U;
1536 	int err;
1537 
1538 	if (mac_addr) {
1539 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1540 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1541 			(mac_addr[4] << 8) | mac_addr[5];
1542 	}
1543 
1544 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1545 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1546 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1547 
1548 	if (enable)
1549 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1550 
1551 	err = aq_hw_err_from_flags(hw);
1552 
1553 	return err;
1554 }
1555 
1556 static int
1557 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1558 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1559 {
1560 	if (is_zero_ether_addr(mac_addr)) {
1561 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1562 		return -EINVAL;
1563 	}
1564 
1565 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1566 }
1567 
1568 static void
1569 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1570 {
1571 	atl_update_mac_addr(dev, index, NULL, false);
1572 }
1573 
1574 static int
1575 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1576 {
1577 	atl_remove_mac_addr(dev, 0);
1578 	atl_add_mac_addr(dev, addr, 0, 0);
1579 	return 0;
1580 }
1581 
1582 static int
1583 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1584 {
1585 	struct rte_eth_dev_info dev_info;
1586 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1587 
1588 	atl_dev_info_get(dev, &dev_info);
1589 
1590 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1591 		return -EINVAL;
1592 
1593 	/* update max frame size */
1594 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1595 
1596 	return 0;
1597 }
1598 
1599 static int
1600 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1601 {
1602 	struct aq_hw_cfg_s *cfg =
1603 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1604 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1605 	int err = 0;
1606 	int i = 0;
1607 
1608 	PMD_INIT_FUNC_TRACE();
1609 
1610 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1611 		if (cfg->vlan_filter[i] == vlan_id) {
1612 			if (!on) {
1613 				/* Disable VLAN filter. */
1614 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1615 
1616 				/* Clear VLAN filter entry */
1617 				cfg->vlan_filter[i] = 0;
1618 			}
1619 			break;
1620 		}
1621 	}
1622 
1623 	/* VLAN_ID was not found. So, nothing to delete. */
1624 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1625 		goto exit;
1626 
1627 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1628 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1629 		goto exit;
1630 
1631 	/* Try to found free VLAN filter to add new VLAN_ID */
1632 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1633 		if (cfg->vlan_filter[i] == 0)
1634 			break;
1635 	}
1636 
1637 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1638 		/* We have no free VLAN filter to add new VLAN_ID*/
1639 		err = -ENOMEM;
1640 		goto exit;
1641 	}
1642 
1643 	cfg->vlan_filter[i] = vlan_id;
1644 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1645 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1646 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1647 
1648 exit:
1649 	/* Enable VLAN promisc mode if vlan_filter empty  */
1650 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1651 		if (cfg->vlan_filter[i] != 0)
1652 			break;
1653 	}
1654 
1655 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1656 
1657 	return err;
1658 }
1659 
1660 static int
1661 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1662 {
1663 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1664 	struct aq_hw_cfg_s *cfg =
1665 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1666 	int i;
1667 
1668 	PMD_INIT_FUNC_TRACE();
1669 
1670 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1671 		if (cfg->vlan_filter[i])
1672 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1673 	}
1674 	return 0;
1675 }
1676 
1677 static int
1678 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1679 {
1680 	struct aq_hw_cfg_s *cfg =
1681 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1682 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1683 	int ret = 0;
1684 	int i;
1685 
1686 	PMD_INIT_FUNC_TRACE();
1687 
1688 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1689 
1690 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1691 
1692 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1693 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1694 
1695 	if (mask & ETH_VLAN_EXTEND_MASK)
1696 		ret = -ENOTSUP;
1697 
1698 	return ret;
1699 }
1700 
1701 static int
1702 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1703 		  uint16_t tpid)
1704 {
1705 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1706 	int err = 0;
1707 
1708 	PMD_INIT_FUNC_TRACE();
1709 
1710 	switch (vlan_type) {
1711 	case ETH_VLAN_TYPE_INNER:
1712 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1713 		break;
1714 	case ETH_VLAN_TYPE_OUTER:
1715 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1716 		break;
1717 	default:
1718 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1719 		err = -ENOTSUP;
1720 	}
1721 
1722 	return err;
1723 }
1724 
1725 static void
1726 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1727 {
1728 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1729 
1730 	PMD_INIT_FUNC_TRACE();
1731 
1732 	if (queue_id > dev->data->nb_rx_queues) {
1733 		PMD_DRV_LOG(ERR, "Invalid queue id");
1734 		return;
1735 	}
1736 
1737 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1738 }
1739 
1740 static int
1741 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1742 			  struct ether_addr *mc_addr_set,
1743 			  uint32_t nb_mc_addr)
1744 {
1745 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1746 	u32 i;
1747 
1748 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1749 		return -EINVAL;
1750 
1751 	/* Update whole uc filters table */
1752 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1753 		u8 *mac_addr = NULL;
1754 		u32 l = 0, h = 0;
1755 
1756 		if (i < nb_mc_addr) {
1757 			mac_addr = mc_addr_set[i].addr_bytes;
1758 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1759 				(mac_addr[4] << 8) | mac_addr[5];
1760 			h = (mac_addr[0] << 8) | mac_addr[1];
1761 		}
1762 
1763 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1764 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1765 							HW_ATL_B0_MAC_MIN + i);
1766 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1767 							HW_ATL_B0_MAC_MIN + i);
1768 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1769 					   HW_ATL_B0_MAC_MIN + i);
1770 	}
1771 
1772 	return 0;
1773 }
1774 
1775 static int
1776 atl_reta_update(struct rte_eth_dev *dev,
1777 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1778 		   uint16_t reta_size)
1779 {
1780 	int i;
1781 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1782 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1783 
1784 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1785 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1786 					dev->data->nb_rx_queues - 1);
1787 
1788 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1789 	return 0;
1790 }
1791 
1792 static int
1793 atl_reta_query(struct rte_eth_dev *dev,
1794 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1795 		    uint16_t reta_size)
1796 {
1797 	int i;
1798 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1799 
1800 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1801 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1802 	reta_conf->mask = ~0U;
1803 	return 0;
1804 }
1805 
1806 static int
1807 atl_rss_hash_update(struct rte_eth_dev *dev,
1808 				 struct rte_eth_rss_conf *rss_conf)
1809 {
1810 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1811 	struct aq_hw_cfg_s *cfg =
1812 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1813 	static u8 def_rss_key[40] = {
1814 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1815 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1816 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1817 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1818 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1819 	};
1820 
1821 	cfg->is_rss = !!rss_conf->rss_hf;
1822 	if (rss_conf->rss_key) {
1823 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1824 		       rss_conf->rss_key_len);
1825 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1826 	} else {
1827 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1828 		       sizeof(def_rss_key));
1829 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1830 	}
1831 
1832 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1833 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1834 	return 0;
1835 }
1836 
1837 static int
1838 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1839 				 struct rte_eth_rss_conf *rss_conf)
1840 {
1841 	struct aq_hw_cfg_s *cfg =
1842 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1843 
1844 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1845 	if (rss_conf->rss_key) {
1846 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1847 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1848 		       rss_conf->rss_key_len);
1849 	}
1850 
1851 	return 0;
1852 }
1853 
1854 static bool
1855 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1856 {
1857 	if (strcmp(dev->device->driver->name, drv->driver.name))
1858 		return false;
1859 
1860 	return true;
1861 }
1862 
1863 bool
1864 is_atlantic_supported(struct rte_eth_dev *dev)
1865 {
1866 	return is_device_supported(dev, &rte_atl_pmd);
1867 }
1868 
1869 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1870 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1871 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1872 
1873 RTE_INIT(atl_init_log)
1874 {
1875 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1876 	if (atl_logtype_init >= 0)
1877 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1878 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1879 	if (atl_logtype_driver >= 0)
1880 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1881 }
1882