xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision 6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 				    struct rte_eth_xstat_name *xstats_names,
35 				    unsigned int size);
36 
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 				struct rte_eth_stats *stats);
39 
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 			      struct rte_eth_xstat *stats, unsigned int n);
42 
43 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
44 
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 			      size_t fw_size);
47 
48 static void atl_dev_info_get(struct rte_eth_dev *dev,
49 			       struct rte_eth_dev_info *dev_info);
50 
51 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 
53 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 
55 /* VLAN stuff */
56 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
57 		uint16_t vlan_id, int on);
58 
59 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
60 
61 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
62 				     uint16_t queue_id, int on);
63 
64 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
65 			     enum rte_vlan_type vlan_type, uint16_t tpid);
66 
67 /* EEPROM */
68 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
69 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
72 			      struct rte_dev_eeprom_info *eeprom);
73 
74 /* Regs */
75 static int atl_dev_get_regs(struct rte_eth_dev *dev,
76 			    struct rte_dev_reg_info *regs);
77 
78 /* Flow control */
79 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
82 			       struct rte_eth_fc_conf *fc_conf);
83 
84 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
85 
86 /* Interrupts */
87 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
88 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
89 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
90 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
91 				    struct rte_intr_handle *handle);
92 static void atl_dev_interrupt_handler(void *param);
93 
94 
95 static int atl_add_mac_addr(struct rte_eth_dev *dev,
96 			    struct rte_ether_addr *mac_addr,
97 			    uint32_t index, uint32_t pool);
98 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
99 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
100 					   struct rte_ether_addr *mac_addr);
101 
102 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
103 				    struct rte_ether_addr *mc_addr_set,
104 				    uint32_t nb_mc_addr);
105 
106 /* RSS */
107 static int atl_reta_update(struct rte_eth_dev *dev,
108 			     struct rte_eth_rss_reta_entry64 *reta_conf,
109 			     uint16_t reta_size);
110 static int atl_reta_query(struct rte_eth_dev *dev,
111 			    struct rte_eth_rss_reta_entry64 *reta_conf,
112 			    uint16_t reta_size);
113 static int atl_rss_hash_update(struct rte_eth_dev *dev,
114 				 struct rte_eth_rss_conf *rss_conf);
115 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
116 				   struct rte_eth_rss_conf *rss_conf);
117 
118 
119 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
120 	struct rte_pci_device *pci_dev);
121 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
122 
123 static void atl_dev_info_get(struct rte_eth_dev *dev,
124 				struct rte_eth_dev_info *dev_info);
125 
126 int atl_logtype_init;
127 int atl_logtype_driver;
128 
129 /*
130  * The set of PCI devices this driver supports
131  */
132 static const struct rte_pci_id pci_id_atl_map[] = {
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
138 
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
145 
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
152 
153 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
154 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
155 	{ .vendor_id = 0, /* sentinel */ },
156 };
157 
158 static struct rte_pci_driver rte_atl_pmd = {
159 	.id_table = pci_id_atl_map,
160 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
161 		     RTE_PCI_DRV_IOVA_AS_VA,
162 	.probe = eth_atl_pci_probe,
163 	.remove = eth_atl_pci_remove,
164 };
165 
166 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
167 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
168 			| DEV_RX_OFFLOAD_UDP_CKSUM \
169 			| DEV_RX_OFFLOAD_TCP_CKSUM \
170 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
171 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
172 			| DEV_RX_OFFLOAD_VLAN_FILTER)
173 
174 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
175 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
176 			| DEV_TX_OFFLOAD_UDP_CKSUM \
177 			| DEV_TX_OFFLOAD_TCP_CKSUM \
178 			| DEV_TX_OFFLOAD_TCP_TSO \
179 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
180 			| DEV_TX_OFFLOAD_MULTI_SEGS)
181 
182 #define SFP_EEPROM_SIZE 0x100
183 
184 static const struct rte_eth_desc_lim rx_desc_lim = {
185 	.nb_max = ATL_MAX_RING_DESC,
186 	.nb_min = ATL_MIN_RING_DESC,
187 	.nb_align = ATL_RXD_ALIGN,
188 };
189 
190 static const struct rte_eth_desc_lim tx_desc_lim = {
191 	.nb_max = ATL_MAX_RING_DESC,
192 	.nb_min = ATL_MIN_RING_DESC,
193 	.nb_align = ATL_TXD_ALIGN,
194 	.nb_seg_max = ATL_TX_MAX_SEG,
195 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
196 };
197 
198 enum atl_xstats_type {
199 	XSTATS_TYPE_MSM = 0,
200 	XSTATS_TYPE_MACSEC,
201 };
202 
203 #define ATL_XSTATS_FIELD(name) { \
204 	#name, \
205 	offsetof(struct aq_stats_s, name), \
206 	XSTATS_TYPE_MSM \
207 }
208 
209 #define ATL_MACSEC_XSTATS_FIELD(name) { \
210 	#name, \
211 	offsetof(struct macsec_stats, name), \
212 	XSTATS_TYPE_MACSEC \
213 }
214 
215 struct atl_xstats_tbl_s {
216 	const char *name;
217 	unsigned int offset;
218 	enum atl_xstats_type type;
219 };
220 
221 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
222 	ATL_XSTATS_FIELD(uprc),
223 	ATL_XSTATS_FIELD(mprc),
224 	ATL_XSTATS_FIELD(bprc),
225 	ATL_XSTATS_FIELD(erpt),
226 	ATL_XSTATS_FIELD(uptc),
227 	ATL_XSTATS_FIELD(mptc),
228 	ATL_XSTATS_FIELD(bptc),
229 	ATL_XSTATS_FIELD(erpr),
230 	ATL_XSTATS_FIELD(ubrc),
231 	ATL_XSTATS_FIELD(ubtc),
232 	ATL_XSTATS_FIELD(mbrc),
233 	ATL_XSTATS_FIELD(mbtc),
234 	ATL_XSTATS_FIELD(bbrc),
235 	ATL_XSTATS_FIELD(bbtc),
236 	/* Ingress Common Counters */
237 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
239 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
244 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
245 	/* Ingress SA Counters */
246 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
247 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
248 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
249 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
250 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
251 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
252 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
254 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
255 	/* Egress Common Counters */
256 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
259 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
260 	/* Egress SC Counters */
261 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
262 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
263 	/* Egress SA Counters */
264 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
265 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
266 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
267 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
268 };
269 
270 static const struct eth_dev_ops atl_eth_dev_ops = {
271 	.dev_configure	      = atl_dev_configure,
272 	.dev_start	      = atl_dev_start,
273 	.dev_stop	      = atl_dev_stop,
274 	.dev_set_link_up      = atl_dev_set_link_up,
275 	.dev_set_link_down    = atl_dev_set_link_down,
276 	.dev_close	      = atl_dev_close,
277 	.dev_reset	      = atl_dev_reset,
278 
279 	/* PROMISC */
280 	.promiscuous_enable   = atl_dev_promiscuous_enable,
281 	.promiscuous_disable  = atl_dev_promiscuous_disable,
282 	.allmulticast_enable  = atl_dev_allmulticast_enable,
283 	.allmulticast_disable = atl_dev_allmulticast_disable,
284 
285 	/* Link */
286 	.link_update	      = atl_dev_link_update,
287 
288 	.get_reg              = atl_dev_get_regs,
289 
290 	/* Stats */
291 	.stats_get	      = atl_dev_stats_get,
292 	.xstats_get	      = atl_dev_xstats_get,
293 	.xstats_get_names     = atl_dev_xstats_get_names,
294 	.stats_reset	      = atl_dev_stats_reset,
295 	.xstats_reset	      = atl_dev_stats_reset,
296 
297 	.fw_version_get       = atl_fw_version_get,
298 	.dev_infos_get	      = atl_dev_info_get,
299 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
300 
301 	.mtu_set              = atl_dev_mtu_set,
302 
303 	/* VLAN */
304 	.vlan_filter_set      = atl_vlan_filter_set,
305 	.vlan_offload_set     = atl_vlan_offload_set,
306 	.vlan_tpid_set        = atl_vlan_tpid_set,
307 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
308 
309 	/* Queue Control */
310 	.rx_queue_start	      = atl_rx_queue_start,
311 	.rx_queue_stop	      = atl_rx_queue_stop,
312 	.rx_queue_setup       = atl_rx_queue_setup,
313 	.rx_queue_release     = atl_rx_queue_release,
314 
315 	.tx_queue_start	      = atl_tx_queue_start,
316 	.tx_queue_stop	      = atl_tx_queue_stop,
317 	.tx_queue_setup       = atl_tx_queue_setup,
318 	.tx_queue_release     = atl_tx_queue_release,
319 
320 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
321 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
322 
323 	.rx_queue_count       = atl_rx_queue_count,
324 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
325 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
326 
327 	/* EEPROM */
328 	.get_eeprom_length    = atl_dev_get_eeprom_length,
329 	.get_eeprom           = atl_dev_get_eeprom,
330 	.set_eeprom           = atl_dev_set_eeprom,
331 
332 	/* Flow Control */
333 	.flow_ctrl_get	      = atl_flow_ctrl_get,
334 	.flow_ctrl_set	      = atl_flow_ctrl_set,
335 
336 	/* MAC */
337 	.mac_addr_add	      = atl_add_mac_addr,
338 	.mac_addr_remove      = atl_remove_mac_addr,
339 	.mac_addr_set	      = atl_set_default_mac_addr,
340 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
341 	.rxq_info_get	      = atl_rxq_info_get,
342 	.txq_info_get	      = atl_txq_info_get,
343 
344 	.reta_update          = atl_reta_update,
345 	.reta_query           = atl_reta_query,
346 	.rss_hash_update      = atl_rss_hash_update,
347 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
348 };
349 
350 static inline int32_t
351 atl_reset_hw(struct aq_hw_s *hw)
352 {
353 	return hw_atl_b0_hw_reset(hw);
354 }
355 
356 static inline void
357 atl_enable_intr(struct rte_eth_dev *dev)
358 {
359 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
360 
361 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
362 }
363 
364 static void
365 atl_disable_intr(struct aq_hw_s *hw)
366 {
367 	PMD_INIT_FUNC_TRACE();
368 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
369 }
370 
371 static int
372 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
373 {
374 	struct atl_adapter *adapter =
375 		(struct atl_adapter *)eth_dev->data->dev_private;
376 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
377 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
378 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
379 	int err = 0;
380 
381 	PMD_INIT_FUNC_TRACE();
382 
383 	eth_dev->dev_ops = &atl_eth_dev_ops;
384 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
385 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
386 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
387 
388 	/* For secondary processes, the primary process has done all the work */
389 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
390 		return 0;
391 
392 	/* Vendor and Device ID need to be set before init of shared code */
393 	hw->device_id = pci_dev->id.device_id;
394 	hw->vendor_id = pci_dev->id.vendor_id;
395 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
396 
397 	/* Hardware configuration - hardcode */
398 	adapter->hw_cfg.is_lro = false;
399 	adapter->hw_cfg.wol = false;
400 	adapter->hw_cfg.is_rss = false;
401 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
402 
403 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
404 			  AQ_NIC_RATE_5G |
405 			  AQ_NIC_RATE_2G5 |
406 			  AQ_NIC_RATE_1G |
407 			  AQ_NIC_RATE_100M;
408 
409 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
410 	adapter->hw_cfg.aq_rss.indirection_table_size =
411 		HW_ATL_B0_RSS_REDIRECTION_MAX;
412 
413 	hw->aq_nic_cfg = &adapter->hw_cfg;
414 
415 	/* disable interrupt */
416 	atl_disable_intr(hw);
417 
418 	/* Allocate memory for storing MAC addresses */
419 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
420 	if (eth_dev->data->mac_addrs == NULL) {
421 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
422 		return -ENOMEM;
423 	}
424 
425 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
426 	if (err)
427 		return err;
428 
429 	/* Copy the permanent MAC address */
430 	if (hw->aq_fw_ops->get_mac_permanent(hw,
431 			eth_dev->data->mac_addrs->addr_bytes) != 0)
432 		return -EINVAL;
433 
434 	/* Reset the hw statistics */
435 	atl_dev_stats_reset(eth_dev);
436 
437 	rte_intr_callback_register(intr_handle,
438 				   atl_dev_interrupt_handler, eth_dev);
439 
440 	/* enable uio/vfio intr/eventfd mapping */
441 	rte_intr_enable(intr_handle);
442 
443 	/* enable support intr */
444 	atl_enable_intr(eth_dev);
445 
446 	return err;
447 }
448 
449 static int
450 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
451 {
452 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
453 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
454 	struct aq_hw_s *hw;
455 
456 	PMD_INIT_FUNC_TRACE();
457 
458 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
459 		return -EPERM;
460 
461 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
462 
463 	if (hw->adapter_stopped == 0)
464 		atl_dev_close(eth_dev);
465 
466 	eth_dev->dev_ops = NULL;
467 	eth_dev->rx_pkt_burst = NULL;
468 	eth_dev->tx_pkt_burst = NULL;
469 
470 	/* disable uio intr before callback unregister */
471 	rte_intr_disable(intr_handle);
472 	rte_intr_callback_unregister(intr_handle,
473 				     atl_dev_interrupt_handler, eth_dev);
474 
475 	rte_free(eth_dev->data->mac_addrs);
476 	eth_dev->data->mac_addrs = NULL;
477 
478 	return 0;
479 }
480 
481 static int
482 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
483 	struct rte_pci_device *pci_dev)
484 {
485 	return rte_eth_dev_pci_generic_probe(pci_dev,
486 		sizeof(struct atl_adapter), eth_atl_dev_init);
487 }
488 
489 static int
490 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
491 {
492 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
493 }
494 
495 static int
496 atl_dev_configure(struct rte_eth_dev *dev)
497 {
498 	struct atl_interrupt *intr =
499 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
500 
501 	PMD_INIT_FUNC_TRACE();
502 
503 	/* set flag to update link status after init */
504 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
505 
506 	return 0;
507 }
508 
509 /*
510  * Configure device link speed and setup link.
511  * It returns 0 on success.
512  */
513 static int
514 atl_dev_start(struct rte_eth_dev *dev)
515 {
516 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
517 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
518 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
519 	uint32_t intr_vector = 0;
520 	int status;
521 	int err;
522 
523 	PMD_INIT_FUNC_TRACE();
524 
525 	/* set adapter started */
526 	hw->adapter_stopped = 0;
527 
528 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
529 		PMD_INIT_LOG(ERR,
530 		"Invalid link_speeds for port %u, fix speed not supported",
531 				dev->data->port_id);
532 		return -EINVAL;
533 	}
534 
535 	/* disable uio/vfio intr/eventfd mapping */
536 	rte_intr_disable(intr_handle);
537 
538 	/* reinitialize adapter
539 	 * this calls reset and start
540 	 */
541 	status = atl_reset_hw(hw);
542 	if (status != 0)
543 		return -EIO;
544 
545 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
546 
547 	hw_atl_b0_hw_start(hw);
548 	/* check and configure queue intr-vector mapping */
549 	if ((rte_intr_cap_multiple(intr_handle) ||
550 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
551 	    dev->data->dev_conf.intr_conf.rxq != 0) {
552 		intr_vector = dev->data->nb_rx_queues;
553 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
554 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
555 					ATL_MAX_INTR_QUEUE_NUM);
556 			return -ENOTSUP;
557 		}
558 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
559 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
560 			return -1;
561 		}
562 	}
563 
564 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
565 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
566 				    dev->data->nb_rx_queues * sizeof(int), 0);
567 		if (intr_handle->intr_vec == NULL) {
568 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
569 				     " intr_vec", dev->data->nb_rx_queues);
570 			return -ENOMEM;
571 		}
572 	}
573 
574 	/* initialize transmission unit */
575 	atl_tx_init(dev);
576 
577 	/* This can fail when allocating mbufs for descriptor rings */
578 	err = atl_rx_init(dev);
579 	if (err) {
580 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
581 		goto error;
582 	}
583 
584 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
585 		hw->fw_ver_actual >> 24,
586 		(hw->fw_ver_actual >> 16) & 0xFF,
587 		hw->fw_ver_actual & 0xFFFF);
588 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
589 
590 	err = atl_start_queues(dev);
591 	if (err < 0) {
592 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
593 		goto error;
594 	}
595 
596 	err = atl_dev_set_link_up(dev);
597 
598 	err = hw->aq_fw_ops->update_link_status(hw);
599 
600 	if (err)
601 		goto error;
602 
603 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
604 
605 	if (rte_intr_allow_others(intr_handle)) {
606 		/* check if lsc interrupt is enabled */
607 		if (dev->data->dev_conf.intr_conf.lsc != 0)
608 			atl_dev_lsc_interrupt_setup(dev, true);
609 		else
610 			atl_dev_lsc_interrupt_setup(dev, false);
611 	} else {
612 		rte_intr_callback_unregister(intr_handle,
613 					     atl_dev_interrupt_handler, dev);
614 		if (dev->data->dev_conf.intr_conf.lsc != 0)
615 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
616 				     " no intr multiplex");
617 	}
618 
619 	/* check if rxq interrupt is enabled */
620 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
621 	    rte_intr_dp_is_en(intr_handle))
622 		atl_dev_rxq_interrupt_setup(dev);
623 
624 	/* enable uio/vfio intr/eventfd mapping */
625 	rte_intr_enable(intr_handle);
626 
627 	/* resume enabled intr since hw reset */
628 	atl_enable_intr(dev);
629 
630 	return 0;
631 
632 error:
633 	atl_stop_queues(dev);
634 	return -EIO;
635 }
636 
637 /*
638  * Stop device: disable rx and tx functions to allow for reconfiguring.
639  */
640 static void
641 atl_dev_stop(struct rte_eth_dev *dev)
642 {
643 	struct rte_eth_link link;
644 	struct aq_hw_s *hw =
645 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
646 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
647 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
648 
649 	PMD_INIT_FUNC_TRACE();
650 
651 	/* disable interrupts */
652 	atl_disable_intr(hw);
653 
654 	/* reset the NIC */
655 	atl_reset_hw(hw);
656 	hw->adapter_stopped = 1;
657 
658 	atl_stop_queues(dev);
659 
660 	/* Clear stored conf */
661 	dev->data->scattered_rx = 0;
662 	dev->data->lro = 0;
663 
664 	/* Clear recorded link status */
665 	memset(&link, 0, sizeof(link));
666 	rte_eth_linkstatus_set(dev, &link);
667 
668 	if (!rte_intr_allow_others(intr_handle))
669 		/* resume to the default handler */
670 		rte_intr_callback_register(intr_handle,
671 					   atl_dev_interrupt_handler,
672 					   (void *)dev);
673 
674 	/* Clean datapath event and queue/vec mapping */
675 	rte_intr_efd_disable(intr_handle);
676 	if (intr_handle->intr_vec != NULL) {
677 		rte_free(intr_handle->intr_vec);
678 		intr_handle->intr_vec = NULL;
679 	}
680 }
681 
682 /*
683  * Set device link up: enable tx.
684  */
685 static int
686 atl_dev_set_link_up(struct rte_eth_dev *dev)
687 {
688 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
689 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
690 	uint32_t speed_mask = 0;
691 
692 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
693 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
694 	} else {
695 		if (link_speeds & ETH_LINK_SPEED_10G)
696 			speed_mask |= AQ_NIC_RATE_10G;
697 		if (link_speeds & ETH_LINK_SPEED_5G)
698 			speed_mask |= AQ_NIC_RATE_5G;
699 		if (link_speeds & ETH_LINK_SPEED_1G)
700 			speed_mask |= AQ_NIC_RATE_1G;
701 		if (link_speeds & ETH_LINK_SPEED_2_5G)
702 			speed_mask |=  AQ_NIC_RATE_2G5;
703 		if (link_speeds & ETH_LINK_SPEED_100M)
704 			speed_mask |= AQ_NIC_RATE_100M;
705 	}
706 
707 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
708 }
709 
710 /*
711  * Set device link down: disable tx.
712  */
713 static int
714 atl_dev_set_link_down(struct rte_eth_dev *dev)
715 {
716 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
717 
718 	return hw->aq_fw_ops->set_link_speed(hw, 0);
719 }
720 
721 /*
722  * Reset and stop device.
723  */
724 static void
725 atl_dev_close(struct rte_eth_dev *dev)
726 {
727 	PMD_INIT_FUNC_TRACE();
728 
729 	atl_dev_stop(dev);
730 
731 	atl_free_queues(dev);
732 }
733 
734 static int
735 atl_dev_reset(struct rte_eth_dev *dev)
736 {
737 	int ret;
738 
739 	ret = eth_atl_dev_uninit(dev);
740 	if (ret)
741 		return ret;
742 
743 	ret = eth_atl_dev_init(dev);
744 
745 	return ret;
746 }
747 
748 static int
749 atl_dev_configure_macsec(struct rte_eth_dev *dev)
750 {
751 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
752 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
753 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
754 	struct macsec_msg_fw_request msg_macsec;
755 	struct macsec_msg_fw_response response;
756 
757 	if (!aqcfg->common.macsec_enabled ||
758 	    hw->aq_fw_ops->send_macsec_req == NULL)
759 		return 0;
760 
761 	memset(&msg_macsec, 0, sizeof(msg_macsec));
762 
763 	/* Creating set of sc/sa structures from parameters provided by DPDK */
764 
765 	/* Configure macsec */
766 	msg_macsec.msg_type = macsec_cfg_msg;
767 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
768 	msg_macsec.cfg.interrupts_enabled = 1;
769 
770 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
771 
772 	if (response.result)
773 		return -1;
774 
775 	memset(&msg_macsec, 0, sizeof(msg_macsec));
776 
777 	/* Configure TX SC */
778 
779 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
780 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
781 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
782 
783 	/* MAC addr for TX */
784 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
785 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
786 	msg_macsec.txsc.sa_mask = 0x3f;
787 
788 	msg_macsec.txsc.da_mask = 0;
789 	msg_macsec.txsc.tci = 0x0B;
790 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
791 
792 	/*
793 	 * Creating SCI (Secure Channel Identifier).
794 	 * SCI constructed from Source MAC and Port identifier
795 	 */
796 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
797 			       (msg_macsec.txsc.mac_sa[0] >> 16);
798 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
799 
800 	uint32_t port_identifier = 1;
801 
802 	msg_macsec.txsc.sci[1] = sci_hi_part;
803 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
804 
805 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
806 
807 	if (response.result)
808 		return -1;
809 
810 	memset(&msg_macsec, 0, sizeof(msg_macsec));
811 
812 	/* Configure RX SC */
813 
814 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
815 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
816 	msg_macsec.rxsc.replay_protect =
817 		aqcfg->common.replay_protection_enabled;
818 	msg_macsec.rxsc.anti_replay_window = 0;
819 
820 	/* MAC addr for RX */
821 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
822 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
823 	msg_macsec.rxsc.da_mask = 0;//0x3f;
824 
825 	msg_macsec.rxsc.sa_mask = 0;
826 
827 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
828 
829 	if (response.result)
830 		return -1;
831 
832 	memset(&msg_macsec, 0, sizeof(msg_macsec));
833 
834 	/* Configure RX SC */
835 
836 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
837 	msg_macsec.txsa.index = aqcfg->txsa.idx;
838 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
839 
840 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
841 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
842 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
843 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
844 
845 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
846 
847 	if (response.result)
848 		return -1;
849 
850 	memset(&msg_macsec, 0, sizeof(msg_macsec));
851 
852 	/* Configure RX SA */
853 
854 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
855 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
856 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
857 
858 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
859 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
860 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
861 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
862 
863 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
864 
865 	if (response.result)
866 		return -1;
867 
868 	return 0;
869 }
870 
871 int atl_macsec_enable(struct rte_eth_dev *dev,
872 		      uint8_t encr, uint8_t repl_prot)
873 {
874 	struct aq_hw_cfg_s *cfg =
875 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
876 
877 	cfg->aq_macsec.common.macsec_enabled = 1;
878 	cfg->aq_macsec.common.encryption_enabled = encr;
879 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
880 
881 	return 0;
882 }
883 
884 int atl_macsec_disable(struct rte_eth_dev *dev)
885 {
886 	struct aq_hw_cfg_s *cfg =
887 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
888 
889 	cfg->aq_macsec.common.macsec_enabled = 0;
890 
891 	return 0;
892 }
893 
894 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
895 {
896 	struct aq_hw_cfg_s *cfg =
897 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
898 
899 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
900 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
901 
902 	return 0;
903 }
904 
905 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
906 			   uint8_t *mac, uint16_t pi)
907 {
908 	struct aq_hw_cfg_s *cfg =
909 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
910 
911 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
912 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
913 	cfg->aq_macsec.rxsc.pi = pi;
914 
915 	return 0;
916 }
917 
918 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
919 			   uint8_t idx, uint8_t an,
920 			   uint32_t pn, uint8_t *key)
921 {
922 	struct aq_hw_cfg_s *cfg =
923 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
924 
925 	cfg->aq_macsec.txsa.idx = idx;
926 	cfg->aq_macsec.txsa.pn = pn;
927 	cfg->aq_macsec.txsa.an = an;
928 
929 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
930 	return 0;
931 }
932 
933 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
934 			   uint8_t idx, uint8_t an,
935 			   uint32_t pn, uint8_t *key)
936 {
937 	struct aq_hw_cfg_s *cfg =
938 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
939 
940 	cfg->aq_macsec.rxsa.idx = idx;
941 	cfg->aq_macsec.rxsa.pn = pn;
942 	cfg->aq_macsec.rxsa.an = an;
943 
944 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
945 	return 0;
946 }
947 
948 static int
949 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
950 {
951 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
952 	struct aq_hw_s *hw = &adapter->hw;
953 	struct atl_sw_stats *swstats = &adapter->sw_stats;
954 	unsigned int i;
955 
956 	hw->aq_fw_ops->update_stats(hw);
957 
958 	/* Fill out the rte_eth_stats statistics structure */
959 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
960 	stats->ibytes = hw->curr_stats.dma_oct_rc;
961 	stats->imissed = hw->curr_stats.dpc;
962 	stats->ierrors = hw->curr_stats.erpt;
963 
964 	stats->opackets = hw->curr_stats.dma_pkt_tc;
965 	stats->obytes = hw->curr_stats.dma_oct_tc;
966 	stats->oerrors = 0;
967 
968 	stats->rx_nombuf = swstats->rx_nombuf;
969 
970 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
971 		stats->q_ipackets[i] = swstats->q_ipackets[i];
972 		stats->q_opackets[i] = swstats->q_opackets[i];
973 		stats->q_ibytes[i] = swstats->q_ibytes[i];
974 		stats->q_obytes[i] = swstats->q_obytes[i];
975 		stats->q_errors[i] = swstats->q_errors[i];
976 	}
977 	return 0;
978 }
979 
980 static void
981 atl_dev_stats_reset(struct rte_eth_dev *dev)
982 {
983 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
984 	struct aq_hw_s *hw = &adapter->hw;
985 
986 	hw->aq_fw_ops->update_stats(hw);
987 
988 	/* Reset software totals */
989 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
990 
991 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
992 }
993 
994 static int
995 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
996 			 struct rte_eth_xstat_name *xstats_names,
997 			 unsigned int size)
998 {
999 	unsigned int i;
1000 
1001 	if (!xstats_names)
1002 		return RTE_DIM(atl_xstats_tbl);
1003 
1004 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
1005 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
1006 			RTE_ETH_XSTATS_NAME_SIZE);
1007 
1008 	return i;
1009 }
1010 
1011 static int
1012 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1013 		   unsigned int n)
1014 {
1015 	struct atl_adapter *adapter =
1016 	(struct atl_adapter *)dev->data->dev_private;
1017 	struct aq_hw_s *hw = &adapter->hw;
1018 	struct get_stats req = { 0 };
1019 	struct macsec_msg_fw_request msg = { 0 };
1020 	struct macsec_msg_fw_response resp = { 0 };
1021 	int err = -1;
1022 	unsigned int i;
1023 
1024 	if (!stats)
1025 		return 0;
1026 
1027 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1028 		req.ingress_sa_index = 0xff;
1029 		req.egress_sc_index = 0xff;
1030 		req.egress_sa_index = 0xff;
1031 
1032 		msg.msg_type = macsec_get_stats_msg;
1033 		msg.stats = req;
1034 
1035 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1036 	}
1037 
1038 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
1039 		stats[i].id = i;
1040 
1041 		switch (atl_xstats_tbl[i].type) {
1042 		case XSTATS_TYPE_MSM:
1043 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1044 					 atl_xstats_tbl[i].offset);
1045 			break;
1046 		case XSTATS_TYPE_MACSEC:
1047 			if (err)
1048 				goto done;
1049 			stats[i].value = *(u64 *)((uint8_t *)&resp.stats +
1050 					 atl_xstats_tbl[i].offset);
1051 			break;
1052 		}
1053 	}
1054 done:
1055 	return i;
1056 }
1057 
1058 static int
1059 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1060 {
1061 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1062 	uint32_t fw_ver = 0;
1063 	unsigned int ret = 0;
1064 
1065 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1066 	if (ret)
1067 		return -EIO;
1068 
1069 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1070 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1071 
1072 	ret += 1; /* add string null-terminator */
1073 
1074 	if (fw_size < ret)
1075 		return ret;
1076 
1077 	return 0;
1078 }
1079 
1080 static void
1081 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1082 {
1083 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1084 
1085 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1086 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1087 
1088 	dev_info->min_rx_bufsize = 1024;
1089 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1090 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1091 	dev_info->max_vfs = pci_dev->max_vfs;
1092 
1093 	dev_info->max_hash_mac_addrs = 0;
1094 	dev_info->max_vmdq_pools = 0;
1095 	dev_info->vmdq_queue_num = 0;
1096 
1097 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1098 
1099 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1100 
1101 
1102 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1103 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1104 	};
1105 
1106 	dev_info->default_txconf = (struct rte_eth_txconf) {
1107 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1108 	};
1109 
1110 	dev_info->rx_desc_lim = rx_desc_lim;
1111 	dev_info->tx_desc_lim = tx_desc_lim;
1112 
1113 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1114 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1115 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1116 
1117 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1118 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1119 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1120 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1121 }
1122 
1123 static const uint32_t *
1124 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1125 {
1126 	static const uint32_t ptypes[] = {
1127 		RTE_PTYPE_L2_ETHER,
1128 		RTE_PTYPE_L2_ETHER_ARP,
1129 		RTE_PTYPE_L2_ETHER_VLAN,
1130 		RTE_PTYPE_L3_IPV4,
1131 		RTE_PTYPE_L3_IPV6,
1132 		RTE_PTYPE_L4_TCP,
1133 		RTE_PTYPE_L4_UDP,
1134 		RTE_PTYPE_L4_SCTP,
1135 		RTE_PTYPE_L4_ICMP,
1136 		RTE_PTYPE_UNKNOWN
1137 	};
1138 
1139 	if (dev->rx_pkt_burst == atl_recv_pkts)
1140 		return ptypes;
1141 
1142 	return NULL;
1143 }
1144 
1145 static void
1146 atl_dev_delayed_handler(void *param)
1147 {
1148 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1149 
1150 	atl_dev_configure_macsec(dev);
1151 }
1152 
1153 
1154 /* return 0 means link status changed, -1 means not changed */
1155 static int
1156 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1157 {
1158 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1159 	struct rte_eth_link link, old;
1160 	u32 fc = AQ_NIC_FC_OFF;
1161 	int err = 0;
1162 
1163 	link.link_status = ETH_LINK_DOWN;
1164 	link.link_speed = 0;
1165 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1166 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1167 	memset(&old, 0, sizeof(old));
1168 
1169 	/* load old link status */
1170 	rte_eth_linkstatus_get(dev, &old);
1171 
1172 	/* read current link status */
1173 	err = hw->aq_fw_ops->update_link_status(hw);
1174 
1175 	if (err)
1176 		return 0;
1177 
1178 	if (hw->aq_link_status.mbps == 0) {
1179 		/* write default (down) link status */
1180 		rte_eth_linkstatus_set(dev, &link);
1181 		if (link.link_status == old.link_status)
1182 			return -1;
1183 		return 0;
1184 	}
1185 
1186 	link.link_status = ETH_LINK_UP;
1187 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1188 	link.link_speed = hw->aq_link_status.mbps;
1189 
1190 	rte_eth_linkstatus_set(dev, &link);
1191 
1192 	if (link.link_status == old.link_status)
1193 		return -1;
1194 
1195 	/* Driver has to update flow control settings on RX block
1196 	 * on any link event.
1197 	 * We should query FW whether it negotiated FC.
1198 	 */
1199 	if (hw->aq_fw_ops->get_flow_control) {
1200 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1201 		hw_atl_b0_set_fc(hw, fc, 0U);
1202 	}
1203 
1204 	if (rte_eal_alarm_set(1000 * 1000,
1205 			      atl_dev_delayed_handler, (void *)dev) < 0)
1206 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1207 
1208 	return 0;
1209 }
1210 
1211 static void
1212 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1213 {
1214 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 
1216 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1217 }
1218 
1219 static void
1220 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1221 {
1222 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1223 
1224 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1225 }
1226 
1227 static void
1228 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1229 {
1230 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1231 
1232 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1233 }
1234 
1235 static void
1236 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1237 {
1238 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1239 
1240 	if (dev->data->promiscuous == 1)
1241 		return; /* must remain in all_multicast mode */
1242 
1243 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1244 }
1245 
1246 /**
1247  * It clears the interrupt causes and enables the interrupt.
1248  * It will be called once only during nic initialized.
1249  *
1250  * @param dev
1251  *  Pointer to struct rte_eth_dev.
1252  * @param on
1253  *  Enable or Disable.
1254  *
1255  * @return
1256  *  - On success, zero.
1257  *  - On failure, a negative value.
1258  */
1259 
1260 static int
1261 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1262 {
1263 	atl_dev_link_status_print(dev);
1264 	return 0;
1265 }
1266 
1267 static int
1268 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1269 {
1270 	return 0;
1271 }
1272 
1273 
1274 static int
1275 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1276 {
1277 	struct atl_interrupt *intr =
1278 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1279 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1280 	u64 cause = 0;
1281 
1282 	hw_atl_b0_hw_irq_read(hw, &cause);
1283 
1284 	atl_disable_intr(hw);
1285 
1286 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1287 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1288 
1289 	return 0;
1290 }
1291 
1292 /**
1293  * It gets and then prints the link status.
1294  *
1295  * @param dev
1296  *  Pointer to struct rte_eth_dev.
1297  *
1298  * @return
1299  *  - On success, zero.
1300  *  - On failure, a negative value.
1301  */
1302 static void
1303 atl_dev_link_status_print(struct rte_eth_dev *dev)
1304 {
1305 	struct rte_eth_link link;
1306 
1307 	memset(&link, 0, sizeof(link));
1308 	rte_eth_linkstatus_get(dev, &link);
1309 	if (link.link_status) {
1310 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1311 					(int)(dev->data->port_id),
1312 					(unsigned int)link.link_speed,
1313 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1314 					"full-duplex" : "half-duplex");
1315 	} else {
1316 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1317 				(int)(dev->data->port_id));
1318 	}
1319 
1320 
1321 #ifdef DEBUG
1322 {
1323 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1324 
1325 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1326 				pci_dev->addr.domain,
1327 				pci_dev->addr.bus,
1328 				pci_dev->addr.devid,
1329 				pci_dev->addr.function);
1330 }
1331 #endif
1332 
1333 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1334 }
1335 
1336 /*
1337  * It executes link_update after knowing an interrupt occurred.
1338  *
1339  * @param dev
1340  *  Pointer to struct rte_eth_dev.
1341  *
1342  * @return
1343  *  - On success, zero.
1344  *  - On failure, a negative value.
1345  */
1346 static int
1347 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1348 			   struct rte_intr_handle *intr_handle)
1349 {
1350 	struct atl_interrupt *intr =
1351 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1352 	struct atl_adapter *adapter =
1353 		(struct atl_adapter *)dev->data->dev_private;
1354 	struct aq_hw_s *hw = &adapter->hw;
1355 
1356 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1357 		goto done;
1358 
1359 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1360 
1361 	/* Notify userapp if link status changed */
1362 	if (!atl_dev_link_update(dev, 0)) {
1363 		atl_dev_link_status_print(dev);
1364 		_rte_eth_dev_callback_process(dev,
1365 			RTE_ETH_EVENT_INTR_LSC, NULL);
1366 	} else {
1367 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1368 			goto done;
1369 
1370 		/* Check macsec Keys expired */
1371 		struct get_stats req = { 0 };
1372 		struct macsec_msg_fw_request msg = { 0 };
1373 		struct macsec_msg_fw_response resp = { 0 };
1374 
1375 		req.ingress_sa_index = 0x0;
1376 		req.egress_sc_index = 0x0;
1377 		req.egress_sa_index = 0x0;
1378 		msg.msg_type = macsec_get_stats_msg;
1379 		msg.stats = req;
1380 
1381 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1382 		if (err) {
1383 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1384 			goto done;
1385 		}
1386 		if (resp.stats.egress_threshold_expired ||
1387 		    resp.stats.ingress_threshold_expired ||
1388 		    resp.stats.egress_expired ||
1389 		    resp.stats.ingress_expired) {
1390 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1391 			_rte_eth_dev_callback_process(dev,
1392 				RTE_ETH_EVENT_MACSEC, NULL);
1393 		}
1394 	}
1395 done:
1396 	atl_enable_intr(dev);
1397 	rte_intr_enable(intr_handle);
1398 
1399 	return 0;
1400 }
1401 
1402 /**
1403  * Interrupt handler triggered by NIC  for handling
1404  * specific interrupt.
1405  *
1406  * @param handle
1407  *  Pointer to interrupt handle.
1408  * @param param
1409  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1410  *
1411  * @return
1412  *  void
1413  */
1414 static void
1415 atl_dev_interrupt_handler(void *param)
1416 {
1417 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1418 
1419 	atl_dev_interrupt_get_status(dev);
1420 	atl_dev_interrupt_action(dev, dev->intr_handle);
1421 }
1422 
1423 
1424 static int
1425 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1426 {
1427 	return SFP_EEPROM_SIZE;
1428 }
1429 
1430 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1431 		       struct rte_dev_eeprom_info *eeprom)
1432 {
1433 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1434 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1435 
1436 	if (hw->aq_fw_ops->get_eeprom == NULL)
1437 		return -ENOTSUP;
1438 
1439 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1440 	    eeprom->data == NULL)
1441 		return -EINVAL;
1442 
1443 	if (eeprom->magic > 0x7F)
1444 		return -EINVAL;
1445 
1446 	if (eeprom->magic)
1447 		dev_addr = eeprom->magic;
1448 
1449 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1450 					 eeprom->length, eeprom->offset);
1451 }
1452 
1453 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1454 		       struct rte_dev_eeprom_info *eeprom)
1455 {
1456 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1457 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1458 
1459 	if (hw->aq_fw_ops->set_eeprom == NULL)
1460 		return -ENOTSUP;
1461 
1462 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1463 	    eeprom->data == NULL)
1464 		return -EINVAL;
1465 
1466 	if (eeprom->magic > 0x7F)
1467 		return -EINVAL;
1468 
1469 	if (eeprom->magic)
1470 		dev_addr = eeprom->magic;
1471 
1472 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1473 					 eeprom->length, eeprom->offset);
1474 }
1475 
1476 static int
1477 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1478 {
1479 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1480 	u32 mif_id;
1481 	int err;
1482 
1483 	if (regs->data == NULL) {
1484 		regs->length = hw_atl_utils_hw_get_reg_length();
1485 		regs->width = sizeof(u32);
1486 		return 0;
1487 	}
1488 
1489 	/* Only full register dump is supported */
1490 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1491 		return -ENOTSUP;
1492 
1493 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1494 
1495 	/* Device version */
1496 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1497 	regs->version = mif_id & 0xFFU;
1498 
1499 	return err;
1500 }
1501 
1502 static int
1503 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1504 {
1505 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1506 	u32 fc = AQ_NIC_FC_OFF;
1507 
1508 	if (hw->aq_fw_ops->get_flow_control == NULL)
1509 		return -ENOTSUP;
1510 
1511 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1512 
1513 	if (fc == AQ_NIC_FC_OFF)
1514 		fc_conf->mode = RTE_FC_NONE;
1515 	else if (fc & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1516 		fc_conf->mode = RTE_FC_FULL;
1517 	else if (fc & AQ_NIC_FC_RX)
1518 		fc_conf->mode = RTE_FC_RX_PAUSE;
1519 	else if (fc & AQ_NIC_FC_RX)
1520 		fc_conf->mode = RTE_FC_TX_PAUSE;
1521 
1522 	return 0;
1523 }
1524 
1525 static int
1526 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1527 {
1528 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1529 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1530 
1531 
1532 	if (hw->aq_fw_ops->set_flow_control == NULL)
1533 		return -ENOTSUP;
1534 
1535 	if (fc_conf->mode == RTE_FC_NONE)
1536 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1537 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1538 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1539 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1540 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1541 	else if (fc_conf->mode == RTE_FC_FULL)
1542 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1543 
1544 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1545 		return hw->aq_fw_ops->set_flow_control(hw);
1546 
1547 	return 0;
1548 }
1549 
1550 static int
1551 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1552 		    u8 *mac_addr, bool enable)
1553 {
1554 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1555 	unsigned int h = 0U;
1556 	unsigned int l = 0U;
1557 	int err;
1558 
1559 	if (mac_addr) {
1560 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1561 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1562 			(mac_addr[4] << 8) | mac_addr[5];
1563 	}
1564 
1565 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1566 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1567 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1568 
1569 	if (enable)
1570 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1571 
1572 	err = aq_hw_err_from_flags(hw);
1573 
1574 	return err;
1575 }
1576 
1577 static int
1578 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1579 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1580 {
1581 	if (is_zero_ether_addr(mac_addr)) {
1582 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1583 		return -EINVAL;
1584 	}
1585 
1586 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1587 }
1588 
1589 static void
1590 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1591 {
1592 	atl_update_mac_addr(dev, index, NULL, false);
1593 }
1594 
1595 static int
1596 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1597 {
1598 	atl_remove_mac_addr(dev, 0);
1599 	atl_add_mac_addr(dev, addr, 0, 0);
1600 	return 0;
1601 }
1602 
1603 static int
1604 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1605 {
1606 	struct rte_eth_dev_info dev_info;
1607 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1608 
1609 	atl_dev_info_get(dev, &dev_info);
1610 
1611 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1612 		return -EINVAL;
1613 
1614 	/* update max frame size */
1615 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1616 
1617 	return 0;
1618 }
1619 
1620 static int
1621 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1622 {
1623 	struct aq_hw_cfg_s *cfg =
1624 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1625 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1626 	int err = 0;
1627 	int i = 0;
1628 
1629 	PMD_INIT_FUNC_TRACE();
1630 
1631 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1632 		if (cfg->vlan_filter[i] == vlan_id) {
1633 			if (!on) {
1634 				/* Disable VLAN filter. */
1635 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1636 
1637 				/* Clear VLAN filter entry */
1638 				cfg->vlan_filter[i] = 0;
1639 			}
1640 			break;
1641 		}
1642 	}
1643 
1644 	/* VLAN_ID was not found. So, nothing to delete. */
1645 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1646 		goto exit;
1647 
1648 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1649 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1650 		goto exit;
1651 
1652 	/* Try to found free VLAN filter to add new VLAN_ID */
1653 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1654 		if (cfg->vlan_filter[i] == 0)
1655 			break;
1656 	}
1657 
1658 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1659 		/* We have no free VLAN filter to add new VLAN_ID*/
1660 		err = -ENOMEM;
1661 		goto exit;
1662 	}
1663 
1664 	cfg->vlan_filter[i] = vlan_id;
1665 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1666 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1667 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1668 
1669 exit:
1670 	/* Enable VLAN promisc mode if vlan_filter empty  */
1671 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1672 		if (cfg->vlan_filter[i] != 0)
1673 			break;
1674 	}
1675 
1676 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1677 
1678 	return err;
1679 }
1680 
1681 static int
1682 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1683 {
1684 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1685 	struct aq_hw_cfg_s *cfg =
1686 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1687 	int i;
1688 
1689 	PMD_INIT_FUNC_TRACE();
1690 
1691 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1692 		if (cfg->vlan_filter[i])
1693 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1694 	}
1695 	return 0;
1696 }
1697 
1698 static int
1699 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1700 {
1701 	struct aq_hw_cfg_s *cfg =
1702 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1703 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1704 	int ret = 0;
1705 	int i;
1706 
1707 	PMD_INIT_FUNC_TRACE();
1708 
1709 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1710 
1711 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1712 
1713 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1714 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1715 
1716 	if (mask & ETH_VLAN_EXTEND_MASK)
1717 		ret = -ENOTSUP;
1718 
1719 	return ret;
1720 }
1721 
1722 static int
1723 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1724 		  uint16_t tpid)
1725 {
1726 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1727 	int err = 0;
1728 
1729 	PMD_INIT_FUNC_TRACE();
1730 
1731 	switch (vlan_type) {
1732 	case ETH_VLAN_TYPE_INNER:
1733 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1734 		break;
1735 	case ETH_VLAN_TYPE_OUTER:
1736 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1737 		break;
1738 	default:
1739 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1740 		err = -ENOTSUP;
1741 	}
1742 
1743 	return err;
1744 }
1745 
1746 static void
1747 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1748 {
1749 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1750 
1751 	PMD_INIT_FUNC_TRACE();
1752 
1753 	if (queue_id > dev->data->nb_rx_queues) {
1754 		PMD_DRV_LOG(ERR, "Invalid queue id");
1755 		return;
1756 	}
1757 
1758 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1759 }
1760 
1761 static int
1762 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1763 			  struct rte_ether_addr *mc_addr_set,
1764 			  uint32_t nb_mc_addr)
1765 {
1766 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1767 	u32 i;
1768 
1769 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1770 		return -EINVAL;
1771 
1772 	/* Update whole uc filters table */
1773 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1774 		u8 *mac_addr = NULL;
1775 		u32 l = 0, h = 0;
1776 
1777 		if (i < nb_mc_addr) {
1778 			mac_addr = mc_addr_set[i].addr_bytes;
1779 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1780 				(mac_addr[4] << 8) | mac_addr[5];
1781 			h = (mac_addr[0] << 8) | mac_addr[1];
1782 		}
1783 
1784 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1785 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1786 							HW_ATL_B0_MAC_MIN + i);
1787 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1788 							HW_ATL_B0_MAC_MIN + i);
1789 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1790 					   HW_ATL_B0_MAC_MIN + i);
1791 	}
1792 
1793 	return 0;
1794 }
1795 
1796 static int
1797 atl_reta_update(struct rte_eth_dev *dev,
1798 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1799 		   uint16_t reta_size)
1800 {
1801 	int i;
1802 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1803 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1804 
1805 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1806 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1807 					dev->data->nb_rx_queues - 1);
1808 
1809 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1810 	return 0;
1811 }
1812 
1813 static int
1814 atl_reta_query(struct rte_eth_dev *dev,
1815 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1816 		    uint16_t reta_size)
1817 {
1818 	int i;
1819 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1820 
1821 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1822 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1823 	reta_conf->mask = ~0U;
1824 	return 0;
1825 }
1826 
1827 static int
1828 atl_rss_hash_update(struct rte_eth_dev *dev,
1829 				 struct rte_eth_rss_conf *rss_conf)
1830 {
1831 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1832 	struct aq_hw_cfg_s *cfg =
1833 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1834 	static u8 def_rss_key[40] = {
1835 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1836 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1837 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1838 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1839 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1840 	};
1841 
1842 	cfg->is_rss = !!rss_conf->rss_hf;
1843 	if (rss_conf->rss_key) {
1844 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1845 		       rss_conf->rss_key_len);
1846 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1847 	} else {
1848 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1849 		       sizeof(def_rss_key));
1850 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1851 	}
1852 
1853 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1854 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1855 	return 0;
1856 }
1857 
1858 static int
1859 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1860 				 struct rte_eth_rss_conf *rss_conf)
1861 {
1862 	struct aq_hw_cfg_s *cfg =
1863 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1864 
1865 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1866 	if (rss_conf->rss_key) {
1867 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1868 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1869 		       rss_conf->rss_key_len);
1870 	}
1871 
1872 	return 0;
1873 }
1874 
1875 static bool
1876 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1877 {
1878 	if (strcmp(dev->device->driver->name, drv->driver.name))
1879 		return false;
1880 
1881 	return true;
1882 }
1883 
1884 bool
1885 is_atlantic_supported(struct rte_eth_dev *dev)
1886 {
1887 	return is_device_supported(dev, &rte_atl_pmd);
1888 }
1889 
1890 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1891 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1892 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1893 
1894 RTE_INIT(atl_init_log)
1895 {
1896 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1897 	if (atl_logtype_init >= 0)
1898 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1899 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1900 	if (atl_logtype_driver >= 0)
1901 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1902 }
1903