xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision b8f5d2ae75c97698190d46f4810d01f407016aad)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static int  atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
47 
48 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
49 
50 /* VLAN stuff */
51 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
52 		uint16_t vlan_id, int on);
53 
54 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
55 
56 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
57 				     uint16_t queue_id, int on);
58 
59 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
60 			     enum rte_vlan_type vlan_type, uint16_t tpid);
61 
62 /* EEPROM */
63 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
64 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
65 			      struct rte_dev_eeprom_info *eeprom);
66 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
67 			      struct rte_dev_eeprom_info *eeprom);
68 
69 /* Regs */
70 static int atl_dev_get_regs(struct rte_eth_dev *dev,
71 			    struct rte_dev_reg_info *regs);
72 
73 /* Flow control */
74 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
75 			       struct rte_eth_fc_conf *fc_conf);
76 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
77 			       struct rte_eth_fc_conf *fc_conf);
78 
79 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
80 
81 /* Interrupts */
82 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
83 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
84 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
85 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
86 				    struct rte_intr_handle *handle);
87 static void atl_dev_interrupt_handler(void *param);
88 
89 
90 static int atl_add_mac_addr(struct rte_eth_dev *dev,
91 			    struct rte_ether_addr *mac_addr,
92 			    uint32_t index, uint32_t pool);
93 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
94 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
95 					   struct rte_ether_addr *mac_addr);
96 
97 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
98 				    struct rte_ether_addr *mc_addr_set,
99 				    uint32_t nb_mc_addr);
100 
101 /* RSS */
102 static int atl_reta_update(struct rte_eth_dev *dev,
103 			     struct rte_eth_rss_reta_entry64 *reta_conf,
104 			     uint16_t reta_size);
105 static int atl_reta_query(struct rte_eth_dev *dev,
106 			    struct rte_eth_rss_reta_entry64 *reta_conf,
107 			    uint16_t reta_size);
108 static int atl_rss_hash_update(struct rte_eth_dev *dev,
109 				 struct rte_eth_rss_conf *rss_conf);
110 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
111 				   struct rte_eth_rss_conf *rss_conf);
112 
113 
114 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
115 	struct rte_pci_device *pci_dev);
116 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
117 
118 static int atl_dev_info_get(struct rte_eth_dev *dev,
119 				struct rte_eth_dev_info *dev_info);
120 
121 /*
122  * The set of PCI devices this driver supports
123  */
124 static const struct rte_pci_id pci_id_atl_map[] = {
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
129 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
130 
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
137 
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
144 
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
147 	{ .vendor_id = 0, /* sentinel */ },
148 };
149 
150 static struct rte_pci_driver rte_atl_pmd = {
151 	.id_table = pci_id_atl_map,
152 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
153 	.probe = eth_atl_pci_probe,
154 	.remove = eth_atl_pci_remove,
155 };
156 
157 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
158 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
159 			| DEV_RX_OFFLOAD_UDP_CKSUM \
160 			| DEV_RX_OFFLOAD_TCP_CKSUM \
161 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
162 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
163 			| DEV_RX_OFFLOAD_VLAN_FILTER)
164 
165 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
166 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
167 			| DEV_TX_OFFLOAD_UDP_CKSUM \
168 			| DEV_TX_OFFLOAD_TCP_CKSUM \
169 			| DEV_TX_OFFLOAD_TCP_TSO \
170 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
171 			| DEV_TX_OFFLOAD_MULTI_SEGS)
172 
173 #define SFP_EEPROM_SIZE 0x100
174 
175 static const struct rte_eth_desc_lim rx_desc_lim = {
176 	.nb_max = ATL_MAX_RING_DESC,
177 	.nb_min = ATL_MIN_RING_DESC,
178 	.nb_align = ATL_RXD_ALIGN,
179 };
180 
181 static const struct rte_eth_desc_lim tx_desc_lim = {
182 	.nb_max = ATL_MAX_RING_DESC,
183 	.nb_min = ATL_MIN_RING_DESC,
184 	.nb_align = ATL_TXD_ALIGN,
185 	.nb_seg_max = ATL_TX_MAX_SEG,
186 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
187 };
188 
189 enum atl_xstats_type {
190 	XSTATS_TYPE_MSM = 0,
191 	XSTATS_TYPE_MACSEC,
192 };
193 
194 #define ATL_XSTATS_FIELD(name) { \
195 	#name, \
196 	offsetof(struct aq_stats_s, name), \
197 	XSTATS_TYPE_MSM \
198 }
199 
200 #define ATL_MACSEC_XSTATS_FIELD(name) { \
201 	#name, \
202 	offsetof(struct macsec_stats, name), \
203 	XSTATS_TYPE_MACSEC \
204 }
205 
206 struct atl_xstats_tbl_s {
207 	const char *name;
208 	unsigned int offset;
209 	enum atl_xstats_type type;
210 };
211 
212 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
213 	ATL_XSTATS_FIELD(uprc),
214 	ATL_XSTATS_FIELD(mprc),
215 	ATL_XSTATS_FIELD(bprc),
216 	ATL_XSTATS_FIELD(erpt),
217 	ATL_XSTATS_FIELD(uptc),
218 	ATL_XSTATS_FIELD(mptc),
219 	ATL_XSTATS_FIELD(bptc),
220 	ATL_XSTATS_FIELD(erpr),
221 	ATL_XSTATS_FIELD(ubrc),
222 	ATL_XSTATS_FIELD(ubtc),
223 	ATL_XSTATS_FIELD(mbrc),
224 	ATL_XSTATS_FIELD(mbtc),
225 	ATL_XSTATS_FIELD(bbrc),
226 	ATL_XSTATS_FIELD(bbtc),
227 	/* Ingress Common Counters */
228 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
229 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
230 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
231 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
232 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
233 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
234 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
235 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
236 	/* Ingress SA Counters */
237 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
239 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
240 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
244 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
245 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
246 	/* Egress Common Counters */
247 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
250 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
251 	/* Egress SC Counters */
252 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
254 	/* Egress SA Counters */
255 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
256 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
259 };
260 
261 static const struct eth_dev_ops atl_eth_dev_ops = {
262 	.dev_configure	      = atl_dev_configure,
263 	.dev_start	      = atl_dev_start,
264 	.dev_stop	      = atl_dev_stop,
265 	.dev_set_link_up      = atl_dev_set_link_up,
266 	.dev_set_link_down    = atl_dev_set_link_down,
267 	.dev_close	      = atl_dev_close,
268 	.dev_reset	      = atl_dev_reset,
269 
270 	/* PROMISC */
271 	.promiscuous_enable   = atl_dev_promiscuous_enable,
272 	.promiscuous_disable  = atl_dev_promiscuous_disable,
273 	.allmulticast_enable  = atl_dev_allmulticast_enable,
274 	.allmulticast_disable = atl_dev_allmulticast_disable,
275 
276 	/* Link */
277 	.link_update	      = atl_dev_link_update,
278 
279 	.get_reg              = atl_dev_get_regs,
280 
281 	/* Stats */
282 	.stats_get	      = atl_dev_stats_get,
283 	.xstats_get	      = atl_dev_xstats_get,
284 	.xstats_get_names     = atl_dev_xstats_get_names,
285 	.stats_reset	      = atl_dev_stats_reset,
286 	.xstats_reset	      = atl_dev_stats_reset,
287 
288 	.fw_version_get       = atl_fw_version_get,
289 	.dev_infos_get	      = atl_dev_info_get,
290 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
291 
292 	.mtu_set              = atl_dev_mtu_set,
293 
294 	/* VLAN */
295 	.vlan_filter_set      = atl_vlan_filter_set,
296 	.vlan_offload_set     = atl_vlan_offload_set,
297 	.vlan_tpid_set        = atl_vlan_tpid_set,
298 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
299 
300 	/* Queue Control */
301 	.rx_queue_start	      = atl_rx_queue_start,
302 	.rx_queue_stop	      = atl_rx_queue_stop,
303 	.rx_queue_setup       = atl_rx_queue_setup,
304 	.rx_queue_release     = atl_rx_queue_release,
305 
306 	.tx_queue_start	      = atl_tx_queue_start,
307 	.tx_queue_stop	      = atl_tx_queue_stop,
308 	.tx_queue_setup       = atl_tx_queue_setup,
309 	.tx_queue_release     = atl_tx_queue_release,
310 
311 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
312 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
313 
314 	/* EEPROM */
315 	.get_eeprom_length    = atl_dev_get_eeprom_length,
316 	.get_eeprom           = atl_dev_get_eeprom,
317 	.set_eeprom           = atl_dev_set_eeprom,
318 
319 	/* Flow Control */
320 	.flow_ctrl_get	      = atl_flow_ctrl_get,
321 	.flow_ctrl_set	      = atl_flow_ctrl_set,
322 
323 	/* MAC */
324 	.mac_addr_add	      = atl_add_mac_addr,
325 	.mac_addr_remove      = atl_remove_mac_addr,
326 	.mac_addr_set	      = atl_set_default_mac_addr,
327 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
328 	.rxq_info_get	      = atl_rxq_info_get,
329 	.txq_info_get	      = atl_txq_info_get,
330 
331 	.reta_update          = atl_reta_update,
332 	.reta_query           = atl_reta_query,
333 	.rss_hash_update      = atl_rss_hash_update,
334 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
335 };
336 
337 static inline int32_t
338 atl_reset_hw(struct aq_hw_s *hw)
339 {
340 	return hw_atl_b0_hw_reset(hw);
341 }
342 
343 static inline void
344 atl_enable_intr(struct rte_eth_dev *dev)
345 {
346 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
347 
348 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
349 }
350 
351 static void
352 atl_disable_intr(struct aq_hw_s *hw)
353 {
354 	PMD_INIT_FUNC_TRACE();
355 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
356 }
357 
358 static int
359 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
360 {
361 	struct atl_adapter *adapter = eth_dev->data->dev_private;
362 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
363 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
364 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
365 	int err = 0;
366 
367 	PMD_INIT_FUNC_TRACE();
368 
369 	eth_dev->dev_ops = &atl_eth_dev_ops;
370 
371 	eth_dev->rx_queue_count       = atl_rx_queue_count;
372 	eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
373 	eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
374 
375 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
376 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
377 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
378 
379 	/* For secondary processes, the primary process has done all the work */
380 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
381 		return 0;
382 
383 	/* Vendor and Device ID need to be set before init of shared code */
384 	hw->device_id = pci_dev->id.device_id;
385 	hw->vendor_id = pci_dev->id.vendor_id;
386 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
387 
388 	/* Hardware configuration - hardcode */
389 	adapter->hw_cfg.is_lro = false;
390 	adapter->hw_cfg.wol = false;
391 	adapter->hw_cfg.is_rss = false;
392 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
393 
394 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
395 			  AQ_NIC_RATE_5G |
396 			  AQ_NIC_RATE_2G5 |
397 			  AQ_NIC_RATE_1G |
398 			  AQ_NIC_RATE_100M;
399 
400 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
401 	adapter->hw_cfg.aq_rss.indirection_table_size =
402 		HW_ATL_B0_RSS_REDIRECTION_MAX;
403 
404 	hw->aq_nic_cfg = &adapter->hw_cfg;
405 
406 	pthread_mutex_init(&hw->mbox_mutex, NULL);
407 
408 	/* disable interrupt */
409 	atl_disable_intr(hw);
410 
411 	/* Allocate memory for storing MAC addresses */
412 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
413 					RTE_ETHER_ADDR_LEN, 0);
414 	if (eth_dev->data->mac_addrs == NULL) {
415 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
416 		return -ENOMEM;
417 	}
418 
419 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
420 	if (err)
421 		return err;
422 
423 	/* Copy the permanent MAC address */
424 	if (hw->aq_fw_ops->get_mac_permanent(hw,
425 			eth_dev->data->mac_addrs->addr_bytes) != 0)
426 		return -EINVAL;
427 
428 	/* Reset the hw statistics */
429 	atl_dev_stats_reset(eth_dev);
430 
431 	rte_intr_callback_register(intr_handle,
432 				   atl_dev_interrupt_handler, eth_dev);
433 
434 	/* enable uio/vfio intr/eventfd mapping */
435 	rte_intr_enable(intr_handle);
436 
437 	/* enable support intr */
438 	atl_enable_intr(eth_dev);
439 
440 	return err;
441 }
442 
443 static int
444 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
445 	struct rte_pci_device *pci_dev)
446 {
447 	return rte_eth_dev_pci_generic_probe(pci_dev,
448 		sizeof(struct atl_adapter), eth_atl_dev_init);
449 }
450 
451 static int
452 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
453 {
454 	return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close);
455 }
456 
457 static int
458 atl_dev_configure(struct rte_eth_dev *dev)
459 {
460 	struct atl_interrupt *intr =
461 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
462 
463 	PMD_INIT_FUNC_TRACE();
464 
465 	/* set flag to update link status after init */
466 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
467 
468 	return 0;
469 }
470 
471 /*
472  * Configure device link speed and setup link.
473  * It returns 0 on success.
474  */
475 static int
476 atl_dev_start(struct rte_eth_dev *dev)
477 {
478 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
479 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
480 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
481 	uint32_t intr_vector = 0;
482 	int status;
483 	int err;
484 
485 	PMD_INIT_FUNC_TRACE();
486 
487 	/* set adapter started */
488 	hw->adapter_stopped = 0;
489 
490 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
491 		PMD_INIT_LOG(ERR,
492 		"Invalid link_speeds for port %u, fix speed not supported",
493 				dev->data->port_id);
494 		return -EINVAL;
495 	}
496 
497 	/* disable uio/vfio intr/eventfd mapping */
498 	rte_intr_disable(intr_handle);
499 
500 	/* reinitialize adapter
501 	 * this calls reset and start
502 	 */
503 	status = atl_reset_hw(hw);
504 	if (status != 0)
505 		return -EIO;
506 
507 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
508 
509 	hw_atl_b0_hw_start(hw);
510 	/* check and configure queue intr-vector mapping */
511 	if ((rte_intr_cap_multiple(intr_handle) ||
512 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
513 	    dev->data->dev_conf.intr_conf.rxq != 0) {
514 		intr_vector = dev->data->nb_rx_queues;
515 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
516 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
517 					ATL_MAX_INTR_QUEUE_NUM);
518 			return -ENOTSUP;
519 		}
520 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
521 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
522 			return -1;
523 		}
524 	}
525 
526 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
527 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
528 				    dev->data->nb_rx_queues * sizeof(int), 0);
529 		if (intr_handle->intr_vec == NULL) {
530 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
531 				     " intr_vec", dev->data->nb_rx_queues);
532 			return -ENOMEM;
533 		}
534 	}
535 
536 	/* initialize transmission unit */
537 	atl_tx_init(dev);
538 
539 	/* This can fail when allocating mbufs for descriptor rings */
540 	err = atl_rx_init(dev);
541 	if (err) {
542 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
543 		goto error;
544 	}
545 
546 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
547 		hw->fw_ver_actual >> 24,
548 		(hw->fw_ver_actual >> 16) & 0xFF,
549 		hw->fw_ver_actual & 0xFFFF);
550 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
551 
552 	err = atl_start_queues(dev);
553 	if (err < 0) {
554 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
555 		goto error;
556 	}
557 
558 	err = atl_dev_set_link_up(dev);
559 
560 	err = hw->aq_fw_ops->update_link_status(hw);
561 
562 	if (err)
563 		goto error;
564 
565 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
566 
567 	if (rte_intr_allow_others(intr_handle)) {
568 		/* check if lsc interrupt is enabled */
569 		if (dev->data->dev_conf.intr_conf.lsc != 0)
570 			atl_dev_lsc_interrupt_setup(dev, true);
571 		else
572 			atl_dev_lsc_interrupt_setup(dev, false);
573 	} else {
574 		rte_intr_callback_unregister(intr_handle,
575 					     atl_dev_interrupt_handler, dev);
576 		if (dev->data->dev_conf.intr_conf.lsc != 0)
577 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
578 				     " no intr multiplex");
579 	}
580 
581 	/* check if rxq interrupt is enabled */
582 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
583 	    rte_intr_dp_is_en(intr_handle))
584 		atl_dev_rxq_interrupt_setup(dev);
585 
586 	/* enable uio/vfio intr/eventfd mapping */
587 	rte_intr_enable(intr_handle);
588 
589 	/* resume enabled intr since hw reset */
590 	atl_enable_intr(dev);
591 
592 	return 0;
593 
594 error:
595 	atl_stop_queues(dev);
596 	return -EIO;
597 }
598 
599 /*
600  * Stop device: disable rx and tx functions to allow for reconfiguring.
601  */
602 static void
603 atl_dev_stop(struct rte_eth_dev *dev)
604 {
605 	struct rte_eth_link link;
606 	struct aq_hw_s *hw =
607 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
609 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
610 
611 	PMD_INIT_FUNC_TRACE();
612 	dev->data->dev_started = 0;
613 
614 	/* disable interrupts */
615 	atl_disable_intr(hw);
616 
617 	/* reset the NIC */
618 	atl_reset_hw(hw);
619 	hw->adapter_stopped = 1;
620 
621 	atl_stop_queues(dev);
622 
623 	/* Clear stored conf */
624 	dev->data->scattered_rx = 0;
625 	dev->data->lro = 0;
626 
627 	/* Clear recorded link status */
628 	memset(&link, 0, sizeof(link));
629 	rte_eth_linkstatus_set(dev, &link);
630 
631 	if (!rte_intr_allow_others(intr_handle))
632 		/* resume to the default handler */
633 		rte_intr_callback_register(intr_handle,
634 					   atl_dev_interrupt_handler,
635 					   (void *)dev);
636 
637 	/* Clean datapath event and queue/vec mapping */
638 	rte_intr_efd_disable(intr_handle);
639 	if (intr_handle->intr_vec != NULL) {
640 		rte_free(intr_handle->intr_vec);
641 		intr_handle->intr_vec = NULL;
642 	}
643 }
644 
645 /*
646  * Set device link up: enable tx.
647  */
648 static int
649 atl_dev_set_link_up(struct rte_eth_dev *dev)
650 {
651 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
652 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
653 	uint32_t speed_mask = 0;
654 
655 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
656 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
657 	} else {
658 		if (link_speeds & ETH_LINK_SPEED_10G)
659 			speed_mask |= AQ_NIC_RATE_10G;
660 		if (link_speeds & ETH_LINK_SPEED_5G)
661 			speed_mask |= AQ_NIC_RATE_5G;
662 		if (link_speeds & ETH_LINK_SPEED_1G)
663 			speed_mask |= AQ_NIC_RATE_1G;
664 		if (link_speeds & ETH_LINK_SPEED_2_5G)
665 			speed_mask |=  AQ_NIC_RATE_2G5;
666 		if (link_speeds & ETH_LINK_SPEED_100M)
667 			speed_mask |= AQ_NIC_RATE_100M;
668 	}
669 
670 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
671 }
672 
673 /*
674  * Set device link down: disable tx.
675  */
676 static int
677 atl_dev_set_link_down(struct rte_eth_dev *dev)
678 {
679 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680 
681 	return hw->aq_fw_ops->set_link_speed(hw, 0);
682 }
683 
684 /*
685  * Reset and stop device.
686  */
687 static int
688 atl_dev_close(struct rte_eth_dev *dev)
689 {
690 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
691 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
692 	struct aq_hw_s *hw;
693 
694 	PMD_INIT_FUNC_TRACE();
695 
696 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
697 		return 0;
698 
699 	hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
700 
701 	atl_dev_stop(dev);
702 
703 	atl_free_queues(dev);
704 
705 	dev->dev_ops = NULL;
706 	dev->rx_pkt_burst = NULL;
707 	dev->tx_pkt_burst = NULL;
708 
709 	/* disable uio intr before callback unregister */
710 	rte_intr_disable(intr_handle);
711 	rte_intr_callback_unregister(intr_handle,
712 				     atl_dev_interrupt_handler, dev);
713 
714 	pthread_mutex_destroy(&hw->mbox_mutex);
715 
716 	return 0;
717 }
718 
719 static int
720 atl_dev_reset(struct rte_eth_dev *dev)
721 {
722 	int ret;
723 
724 	ret = atl_dev_close(dev);
725 	if (ret)
726 		return ret;
727 
728 	ret = eth_atl_dev_init(dev);
729 
730 	return ret;
731 }
732 
733 static int
734 atl_dev_configure_macsec(struct rte_eth_dev *dev)
735 {
736 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
737 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
738 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
739 	struct macsec_msg_fw_request msg_macsec;
740 	struct macsec_msg_fw_response response;
741 
742 	if (!aqcfg->common.macsec_enabled ||
743 	    hw->aq_fw_ops->send_macsec_req == NULL)
744 		return 0;
745 
746 	memset(&msg_macsec, 0, sizeof(msg_macsec));
747 
748 	/* Creating set of sc/sa structures from parameters provided by DPDK */
749 
750 	/* Configure macsec */
751 	msg_macsec.msg_type = macsec_cfg_msg;
752 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
753 	msg_macsec.cfg.interrupts_enabled = 1;
754 
755 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
756 
757 	if (response.result)
758 		return -1;
759 
760 	memset(&msg_macsec, 0, sizeof(msg_macsec));
761 
762 	/* Configure TX SC */
763 
764 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
765 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
766 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
767 
768 	/* MAC addr for TX */
769 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
770 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
771 	msg_macsec.txsc.sa_mask = 0x3f;
772 
773 	msg_macsec.txsc.da_mask = 0;
774 	msg_macsec.txsc.tci = 0x0B;
775 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
776 
777 	/*
778 	 * Creating SCI (Secure Channel Identifier).
779 	 * SCI constructed from Source MAC and Port identifier
780 	 */
781 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
782 			       (msg_macsec.txsc.mac_sa[0] >> 16);
783 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
784 
785 	uint32_t port_identifier = 1;
786 
787 	msg_macsec.txsc.sci[1] = sci_hi_part;
788 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
789 
790 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
791 
792 	if (response.result)
793 		return -1;
794 
795 	memset(&msg_macsec, 0, sizeof(msg_macsec));
796 
797 	/* Configure RX SC */
798 
799 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
800 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
801 	msg_macsec.rxsc.replay_protect =
802 		aqcfg->common.replay_protection_enabled;
803 	msg_macsec.rxsc.anti_replay_window = 0;
804 
805 	/* MAC addr for RX */
806 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
807 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
808 	msg_macsec.rxsc.da_mask = 0;//0x3f;
809 
810 	msg_macsec.rxsc.sa_mask = 0;
811 
812 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
813 
814 	if (response.result)
815 		return -1;
816 
817 	memset(&msg_macsec, 0, sizeof(msg_macsec));
818 
819 	/* Configure RX SC */
820 
821 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
822 	msg_macsec.txsa.index = aqcfg->txsa.idx;
823 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
824 
825 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
826 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
827 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
828 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
829 
830 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
831 
832 	if (response.result)
833 		return -1;
834 
835 	memset(&msg_macsec, 0, sizeof(msg_macsec));
836 
837 	/* Configure RX SA */
838 
839 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
840 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
841 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
842 
843 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
844 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
845 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
846 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
847 
848 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
849 
850 	if (response.result)
851 		return -1;
852 
853 	return 0;
854 }
855 
856 int atl_macsec_enable(struct rte_eth_dev *dev,
857 		      uint8_t encr, uint8_t repl_prot)
858 {
859 	struct aq_hw_cfg_s *cfg =
860 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
861 
862 	cfg->aq_macsec.common.macsec_enabled = 1;
863 	cfg->aq_macsec.common.encryption_enabled = encr;
864 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
865 
866 	return 0;
867 }
868 
869 int atl_macsec_disable(struct rte_eth_dev *dev)
870 {
871 	struct aq_hw_cfg_s *cfg =
872 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
873 
874 	cfg->aq_macsec.common.macsec_enabled = 0;
875 
876 	return 0;
877 }
878 
879 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
880 {
881 	struct aq_hw_cfg_s *cfg =
882 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
883 
884 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
885 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
886 		RTE_ETHER_ADDR_LEN);
887 
888 	return 0;
889 }
890 
891 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
892 			   uint8_t *mac, uint16_t pi)
893 {
894 	struct aq_hw_cfg_s *cfg =
895 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
896 
897 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
898 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
899 		RTE_ETHER_ADDR_LEN);
900 	cfg->aq_macsec.rxsc.pi = pi;
901 
902 	return 0;
903 }
904 
905 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
906 			   uint8_t idx, uint8_t an,
907 			   uint32_t pn, uint8_t *key)
908 {
909 	struct aq_hw_cfg_s *cfg =
910 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
911 
912 	cfg->aq_macsec.txsa.idx = idx;
913 	cfg->aq_macsec.txsa.pn = pn;
914 	cfg->aq_macsec.txsa.an = an;
915 
916 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
917 	return 0;
918 }
919 
920 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
921 			   uint8_t idx, uint8_t an,
922 			   uint32_t pn, uint8_t *key)
923 {
924 	struct aq_hw_cfg_s *cfg =
925 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
926 
927 	cfg->aq_macsec.rxsa.idx = idx;
928 	cfg->aq_macsec.rxsa.pn = pn;
929 	cfg->aq_macsec.rxsa.an = an;
930 
931 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
932 	return 0;
933 }
934 
935 static int
936 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
937 {
938 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
939 	struct aq_hw_s *hw = &adapter->hw;
940 	struct atl_sw_stats *swstats = &adapter->sw_stats;
941 	unsigned int i;
942 
943 	hw->aq_fw_ops->update_stats(hw);
944 
945 	/* Fill out the rte_eth_stats statistics structure */
946 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
947 	stats->ibytes = hw->curr_stats.dma_oct_rc;
948 	stats->imissed = hw->curr_stats.dpc;
949 	stats->ierrors = hw->curr_stats.erpt;
950 
951 	stats->opackets = hw->curr_stats.dma_pkt_tc;
952 	stats->obytes = hw->curr_stats.dma_oct_tc;
953 	stats->oerrors = 0;
954 
955 	stats->rx_nombuf = swstats->rx_nombuf;
956 
957 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
958 		stats->q_ipackets[i] = swstats->q_ipackets[i];
959 		stats->q_opackets[i] = swstats->q_opackets[i];
960 		stats->q_ibytes[i] = swstats->q_ibytes[i];
961 		stats->q_obytes[i] = swstats->q_obytes[i];
962 		stats->q_errors[i] = swstats->q_errors[i];
963 	}
964 	return 0;
965 }
966 
967 static int
968 atl_dev_stats_reset(struct rte_eth_dev *dev)
969 {
970 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
971 	struct aq_hw_s *hw = &adapter->hw;
972 
973 	hw->aq_fw_ops->update_stats(hw);
974 
975 	/* Reset software totals */
976 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
977 
978 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
979 
980 	return 0;
981 }
982 
983 static int
984 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
985 {
986 	struct atl_adapter *adapter =
987 		(struct atl_adapter *)dev->data->dev_private;
988 
989 	struct aq_hw_s *hw = &adapter->hw;
990 	unsigned int i, count = 0;
991 
992 	for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
993 		if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
994 			((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
995 			continue;
996 
997 		count++;
998 	}
999 
1000 	return count;
1001 }
1002 
1003 static int
1004 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1005 			 struct rte_eth_xstat_name *xstats_names,
1006 			 unsigned int size)
1007 {
1008 	unsigned int i;
1009 	unsigned int count = atl_dev_xstats_get_count(dev);
1010 
1011 	if (xstats_names) {
1012 		for (i = 0; i < size && i < count; i++) {
1013 			snprintf(xstats_names[i].name,
1014 				RTE_ETH_XSTATS_NAME_SIZE, "%s",
1015 				atl_xstats_tbl[i].name);
1016 		}
1017 	}
1018 
1019 	return count;
1020 }
1021 
1022 static int
1023 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1024 		   unsigned int n)
1025 {
1026 	struct atl_adapter *adapter = dev->data->dev_private;
1027 	struct aq_hw_s *hw = &adapter->hw;
1028 	struct get_stats req = { 0 };
1029 	struct macsec_msg_fw_request msg = { 0 };
1030 	struct macsec_msg_fw_response resp = { 0 };
1031 	int err = -1;
1032 	unsigned int i;
1033 	unsigned int count = atl_dev_xstats_get_count(dev);
1034 
1035 	if (!stats)
1036 		return count;
1037 
1038 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1039 		req.ingress_sa_index = 0xff;
1040 		req.egress_sc_index = 0xff;
1041 		req.egress_sa_index = 0xff;
1042 
1043 		msg.msg_type = macsec_get_stats_msg;
1044 		msg.stats = req;
1045 
1046 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1047 	}
1048 
1049 	for (i = 0; i < n && i < count; i++) {
1050 		stats[i].id = i;
1051 
1052 		switch (atl_xstats_tbl[i].type) {
1053 		case XSTATS_TYPE_MSM:
1054 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1055 					 atl_xstats_tbl[i].offset);
1056 			break;
1057 		case XSTATS_TYPE_MACSEC:
1058 			if (!err) {
1059 				stats[i].value =
1060 					*(u64 *)((uint8_t *)&resp.stats +
1061 					atl_xstats_tbl[i].offset);
1062 			}
1063 			break;
1064 		}
1065 	}
1066 
1067 	return i;
1068 }
1069 
1070 static int
1071 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1072 {
1073 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1074 	uint32_t fw_ver = 0;
1075 	unsigned int ret = 0;
1076 
1077 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1078 	if (ret)
1079 		return -EIO;
1080 
1081 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1082 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1083 
1084 	ret += 1; /* add string null-terminator */
1085 
1086 	if (fw_size < ret)
1087 		return ret;
1088 
1089 	return 0;
1090 }
1091 
1092 static int
1093 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1094 {
1095 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1096 
1097 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1098 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1099 
1100 	dev_info->min_rx_bufsize = 1024;
1101 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1102 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1103 	dev_info->max_vfs = pci_dev->max_vfs;
1104 
1105 	dev_info->max_hash_mac_addrs = 0;
1106 	dev_info->max_vmdq_pools = 0;
1107 	dev_info->vmdq_queue_num = 0;
1108 
1109 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1110 
1111 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1112 
1113 
1114 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1115 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1116 	};
1117 
1118 	dev_info->default_txconf = (struct rte_eth_txconf) {
1119 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1120 	};
1121 
1122 	dev_info->rx_desc_lim = rx_desc_lim;
1123 	dev_info->tx_desc_lim = tx_desc_lim;
1124 
1125 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1126 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1127 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1128 
1129 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1130 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1131 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1132 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1133 
1134 	return 0;
1135 }
1136 
1137 static const uint32_t *
1138 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1139 {
1140 	static const uint32_t ptypes[] = {
1141 		RTE_PTYPE_L2_ETHER,
1142 		RTE_PTYPE_L2_ETHER_ARP,
1143 		RTE_PTYPE_L2_ETHER_VLAN,
1144 		RTE_PTYPE_L3_IPV4,
1145 		RTE_PTYPE_L3_IPV6,
1146 		RTE_PTYPE_L4_TCP,
1147 		RTE_PTYPE_L4_UDP,
1148 		RTE_PTYPE_L4_SCTP,
1149 		RTE_PTYPE_L4_ICMP,
1150 		RTE_PTYPE_UNKNOWN
1151 	};
1152 
1153 	if (dev->rx_pkt_burst == atl_recv_pkts)
1154 		return ptypes;
1155 
1156 	return NULL;
1157 }
1158 
1159 static void
1160 atl_dev_delayed_handler(void *param)
1161 {
1162 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1163 
1164 	atl_dev_configure_macsec(dev);
1165 }
1166 
1167 
1168 /* return 0 means link status changed, -1 means not changed */
1169 static int
1170 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1171 {
1172 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1173 	struct rte_eth_link link, old;
1174 	u32 fc = AQ_NIC_FC_OFF;
1175 	int err = 0;
1176 
1177 	link.link_status = ETH_LINK_DOWN;
1178 	link.link_speed = 0;
1179 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1180 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1181 	memset(&old, 0, sizeof(old));
1182 
1183 	/* load old link status */
1184 	rte_eth_linkstatus_get(dev, &old);
1185 
1186 	/* read current link status */
1187 	err = hw->aq_fw_ops->update_link_status(hw);
1188 
1189 	if (err)
1190 		return 0;
1191 
1192 	if (hw->aq_link_status.mbps == 0) {
1193 		/* write default (down) link status */
1194 		rte_eth_linkstatus_set(dev, &link);
1195 		if (link.link_status == old.link_status)
1196 			return -1;
1197 		return 0;
1198 	}
1199 
1200 	link.link_status = ETH_LINK_UP;
1201 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1202 	link.link_speed = hw->aq_link_status.mbps;
1203 
1204 	rte_eth_linkstatus_set(dev, &link);
1205 
1206 	if (link.link_status == old.link_status)
1207 		return -1;
1208 
1209 	/* Driver has to update flow control settings on RX block
1210 	 * on any link event.
1211 	 * We should query FW whether it negotiated FC.
1212 	 */
1213 	if (hw->aq_fw_ops->get_flow_control) {
1214 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1215 		hw_atl_b0_set_fc(hw, fc, 0U);
1216 	}
1217 
1218 	if (rte_eal_alarm_set(1000 * 1000,
1219 			      atl_dev_delayed_handler, (void *)dev) < 0)
1220 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1221 
1222 	return 0;
1223 }
1224 
1225 static int
1226 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1227 {
1228 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1229 
1230 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1231 
1232 	return 0;
1233 }
1234 
1235 static int
1236 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1237 {
1238 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1239 
1240 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1241 
1242 	return 0;
1243 }
1244 
1245 static int
1246 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1247 {
1248 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1249 
1250 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1251 
1252 	return 0;
1253 }
1254 
1255 static int
1256 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1257 {
1258 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1259 
1260 	if (dev->data->promiscuous == 1)
1261 		return 0; /* must remain in all_multicast mode */
1262 
1263 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1264 
1265 	return 0;
1266 }
1267 
1268 /**
1269  * It clears the interrupt causes and enables the interrupt.
1270  * It will be called once only during nic initialized.
1271  *
1272  * @param dev
1273  *  Pointer to struct rte_eth_dev.
1274  * @param on
1275  *  Enable or Disable.
1276  *
1277  * @return
1278  *  - On success, zero.
1279  *  - On failure, a negative value.
1280  */
1281 
1282 static int
1283 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1284 {
1285 	atl_dev_link_status_print(dev);
1286 	return 0;
1287 }
1288 
1289 static int
1290 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1291 {
1292 	return 0;
1293 }
1294 
1295 
1296 static int
1297 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1298 {
1299 	struct atl_interrupt *intr =
1300 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1301 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1302 	u64 cause = 0;
1303 
1304 	hw_atl_b0_hw_irq_read(hw, &cause);
1305 
1306 	atl_disable_intr(hw);
1307 
1308 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1309 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1310 
1311 	return 0;
1312 }
1313 
1314 /**
1315  * It gets and then prints the link status.
1316  *
1317  * @param dev
1318  *  Pointer to struct rte_eth_dev.
1319  *
1320  * @return
1321  *  - On success, zero.
1322  *  - On failure, a negative value.
1323  */
1324 static void
1325 atl_dev_link_status_print(struct rte_eth_dev *dev)
1326 {
1327 	struct rte_eth_link link;
1328 
1329 	memset(&link, 0, sizeof(link));
1330 	rte_eth_linkstatus_get(dev, &link);
1331 	if (link.link_status) {
1332 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1333 					(int)(dev->data->port_id),
1334 					(unsigned int)link.link_speed,
1335 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1336 					"full-duplex" : "half-duplex");
1337 	} else {
1338 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1339 				(int)(dev->data->port_id));
1340 	}
1341 
1342 
1343 #ifdef DEBUG
1344 {
1345 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1346 
1347 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1348 				pci_dev->addr.domain,
1349 				pci_dev->addr.bus,
1350 				pci_dev->addr.devid,
1351 				pci_dev->addr.function);
1352 }
1353 #endif
1354 
1355 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1356 }
1357 
1358 /*
1359  * It executes link_update after knowing an interrupt occurred.
1360  *
1361  * @param dev
1362  *  Pointer to struct rte_eth_dev.
1363  *
1364  * @return
1365  *  - On success, zero.
1366  *  - On failure, a negative value.
1367  */
1368 static int
1369 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1370 			   struct rte_intr_handle *intr_handle)
1371 {
1372 	struct atl_interrupt *intr =
1373 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1374 	struct atl_adapter *adapter = dev->data->dev_private;
1375 	struct aq_hw_s *hw = &adapter->hw;
1376 
1377 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1378 		goto done;
1379 
1380 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1381 
1382 	/* Notify userapp if link status changed */
1383 	if (!atl_dev_link_update(dev, 0)) {
1384 		atl_dev_link_status_print(dev);
1385 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1386 	} else {
1387 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1388 			goto done;
1389 
1390 		/* Check macsec Keys expired */
1391 		struct get_stats req = { 0 };
1392 		struct macsec_msg_fw_request msg = { 0 };
1393 		struct macsec_msg_fw_response resp = { 0 };
1394 
1395 		req.ingress_sa_index = 0x0;
1396 		req.egress_sc_index = 0x0;
1397 		req.egress_sa_index = 0x0;
1398 		msg.msg_type = macsec_get_stats_msg;
1399 		msg.stats = req;
1400 
1401 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1402 		if (err) {
1403 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1404 			goto done;
1405 		}
1406 		if (resp.stats.egress_threshold_expired ||
1407 		    resp.stats.ingress_threshold_expired ||
1408 		    resp.stats.egress_expired ||
1409 		    resp.stats.ingress_expired) {
1410 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1411 			rte_eth_dev_callback_process(dev,
1412 				RTE_ETH_EVENT_MACSEC, NULL);
1413 		}
1414 	}
1415 done:
1416 	atl_enable_intr(dev);
1417 	rte_intr_ack(intr_handle);
1418 
1419 	return 0;
1420 }
1421 
1422 /**
1423  * Interrupt handler triggered by NIC  for handling
1424  * specific interrupt.
1425  *
1426  * @param handle
1427  *  Pointer to interrupt handle.
1428  * @param param
1429  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1430  *
1431  * @return
1432  *  void
1433  */
1434 static void
1435 atl_dev_interrupt_handler(void *param)
1436 {
1437 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1438 
1439 	atl_dev_interrupt_get_status(dev);
1440 	atl_dev_interrupt_action(dev, dev->intr_handle);
1441 }
1442 
1443 
1444 static int
1445 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1446 {
1447 	return SFP_EEPROM_SIZE;
1448 }
1449 
1450 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1451 		       struct rte_dev_eeprom_info *eeprom)
1452 {
1453 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1454 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1455 
1456 	if (hw->aq_fw_ops->get_eeprom == NULL)
1457 		return -ENOTSUP;
1458 
1459 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1460 	    eeprom->data == NULL)
1461 		return -EINVAL;
1462 
1463 	if (eeprom->magic > 0x7F)
1464 		return -EINVAL;
1465 
1466 	if (eeprom->magic)
1467 		dev_addr = eeprom->magic;
1468 
1469 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1470 					 eeprom->length, eeprom->offset);
1471 }
1472 
1473 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1474 		       struct rte_dev_eeprom_info *eeprom)
1475 {
1476 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1477 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1478 
1479 	if (hw->aq_fw_ops->set_eeprom == NULL)
1480 		return -ENOTSUP;
1481 
1482 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1483 	    eeprom->data == NULL)
1484 		return -EINVAL;
1485 
1486 	if (eeprom->magic > 0x7F)
1487 		return -EINVAL;
1488 
1489 	if (eeprom->magic)
1490 		dev_addr = eeprom->magic;
1491 
1492 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1493 					 eeprom->length, eeprom->offset);
1494 }
1495 
1496 static int
1497 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1498 {
1499 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1500 	u32 mif_id;
1501 	int err;
1502 
1503 	if (regs->data == NULL) {
1504 		regs->length = hw_atl_utils_hw_get_reg_length();
1505 		regs->width = sizeof(u32);
1506 		return 0;
1507 	}
1508 
1509 	/* Only full register dump is supported */
1510 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1511 		return -ENOTSUP;
1512 
1513 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1514 
1515 	/* Device version */
1516 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1517 	regs->version = mif_id & 0xFFU;
1518 
1519 	return err;
1520 }
1521 
1522 static int
1523 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1524 {
1525 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1526 	u32 fc = AQ_NIC_FC_OFF;
1527 
1528 	if (hw->aq_fw_ops->get_flow_control == NULL)
1529 		return -ENOTSUP;
1530 
1531 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1532 
1533 	if (fc == AQ_NIC_FC_OFF)
1534 		fc_conf->mode = RTE_FC_NONE;
1535 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1536 		fc_conf->mode = RTE_FC_FULL;
1537 	else if (fc & AQ_NIC_FC_RX)
1538 		fc_conf->mode = RTE_FC_RX_PAUSE;
1539 	else if (fc & AQ_NIC_FC_TX)
1540 		fc_conf->mode = RTE_FC_TX_PAUSE;
1541 
1542 	return 0;
1543 }
1544 
1545 static int
1546 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1547 {
1548 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1549 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1550 
1551 
1552 	if (hw->aq_fw_ops->set_flow_control == NULL)
1553 		return -ENOTSUP;
1554 
1555 	if (fc_conf->mode == RTE_FC_NONE)
1556 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1557 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1558 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1559 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1560 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1561 	else if (fc_conf->mode == RTE_FC_FULL)
1562 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1563 
1564 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1565 		return hw->aq_fw_ops->set_flow_control(hw);
1566 
1567 	return 0;
1568 }
1569 
1570 static int
1571 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1572 		    u8 *mac_addr, bool enable)
1573 {
1574 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1575 	unsigned int h = 0U;
1576 	unsigned int l = 0U;
1577 	int err;
1578 
1579 	if (mac_addr) {
1580 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1581 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1582 			(mac_addr[4] << 8) | mac_addr[5];
1583 	}
1584 
1585 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1586 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1587 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1588 
1589 	if (enable)
1590 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1591 
1592 	err = aq_hw_err_from_flags(hw);
1593 
1594 	return err;
1595 }
1596 
1597 static int
1598 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1599 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1600 {
1601 	if (rte_is_zero_ether_addr(mac_addr)) {
1602 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1603 		return -EINVAL;
1604 	}
1605 
1606 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1607 }
1608 
1609 static void
1610 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1611 {
1612 	atl_update_mac_addr(dev, index, NULL, false);
1613 }
1614 
1615 static int
1616 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1617 {
1618 	atl_remove_mac_addr(dev, 0);
1619 	atl_add_mac_addr(dev, addr, 0, 0);
1620 	return 0;
1621 }
1622 
1623 static int
1624 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1625 {
1626 	struct rte_eth_dev_info dev_info;
1627 	int ret;
1628 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1629 
1630 	ret = atl_dev_info_get(dev, &dev_info);
1631 	if (ret != 0)
1632 		return ret;
1633 
1634 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1635 		return -EINVAL;
1636 
1637 	/* update max frame size */
1638 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1639 
1640 	return 0;
1641 }
1642 
1643 static int
1644 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1645 {
1646 	struct aq_hw_cfg_s *cfg =
1647 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1648 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1649 	int err = 0;
1650 	int i = 0;
1651 
1652 	PMD_INIT_FUNC_TRACE();
1653 
1654 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1655 		if (cfg->vlan_filter[i] == vlan_id) {
1656 			if (!on) {
1657 				/* Disable VLAN filter. */
1658 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1659 
1660 				/* Clear VLAN filter entry */
1661 				cfg->vlan_filter[i] = 0;
1662 			}
1663 			break;
1664 		}
1665 	}
1666 
1667 	/* VLAN_ID was not found. So, nothing to delete. */
1668 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1669 		goto exit;
1670 
1671 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1672 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1673 		goto exit;
1674 
1675 	/* Try to found free VLAN filter to add new VLAN_ID */
1676 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1677 		if (cfg->vlan_filter[i] == 0)
1678 			break;
1679 	}
1680 
1681 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1682 		/* We have no free VLAN filter to add new VLAN_ID*/
1683 		err = -ENOMEM;
1684 		goto exit;
1685 	}
1686 
1687 	cfg->vlan_filter[i] = vlan_id;
1688 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1689 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1690 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1691 
1692 exit:
1693 	/* Enable VLAN promisc mode if vlan_filter empty  */
1694 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1695 		if (cfg->vlan_filter[i] != 0)
1696 			break;
1697 	}
1698 
1699 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1700 
1701 	return err;
1702 }
1703 
1704 static int
1705 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1706 {
1707 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1708 	struct aq_hw_cfg_s *cfg =
1709 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1710 	int i;
1711 
1712 	PMD_INIT_FUNC_TRACE();
1713 
1714 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1715 		if (cfg->vlan_filter[i])
1716 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1717 	}
1718 	return 0;
1719 }
1720 
1721 static int
1722 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1723 {
1724 	struct aq_hw_cfg_s *cfg =
1725 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1726 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1727 	int ret = 0;
1728 	int i;
1729 
1730 	PMD_INIT_FUNC_TRACE();
1731 
1732 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1733 
1734 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1735 
1736 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1737 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1738 
1739 	if (mask & ETH_VLAN_EXTEND_MASK)
1740 		ret = -ENOTSUP;
1741 
1742 	return ret;
1743 }
1744 
1745 static int
1746 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1747 		  uint16_t tpid)
1748 {
1749 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1750 	int err = 0;
1751 
1752 	PMD_INIT_FUNC_TRACE();
1753 
1754 	switch (vlan_type) {
1755 	case ETH_VLAN_TYPE_INNER:
1756 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1757 		break;
1758 	case ETH_VLAN_TYPE_OUTER:
1759 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1760 		break;
1761 	default:
1762 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1763 		err = -ENOTSUP;
1764 	}
1765 
1766 	return err;
1767 }
1768 
1769 static void
1770 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1771 {
1772 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1773 
1774 	PMD_INIT_FUNC_TRACE();
1775 
1776 	if (queue_id > dev->data->nb_rx_queues) {
1777 		PMD_DRV_LOG(ERR, "Invalid queue id");
1778 		return;
1779 	}
1780 
1781 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1782 }
1783 
1784 static int
1785 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1786 			  struct rte_ether_addr *mc_addr_set,
1787 			  uint32_t nb_mc_addr)
1788 {
1789 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1790 	u32 i;
1791 
1792 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1793 		return -EINVAL;
1794 
1795 	/* Update whole uc filters table */
1796 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1797 		u8 *mac_addr = NULL;
1798 		u32 l = 0, h = 0;
1799 
1800 		if (i < nb_mc_addr) {
1801 			mac_addr = mc_addr_set[i].addr_bytes;
1802 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1803 				(mac_addr[4] << 8) | mac_addr[5];
1804 			h = (mac_addr[0] << 8) | mac_addr[1];
1805 		}
1806 
1807 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1808 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1809 							HW_ATL_B0_MAC_MIN + i);
1810 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1811 							HW_ATL_B0_MAC_MIN + i);
1812 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1813 					   HW_ATL_B0_MAC_MIN + i);
1814 	}
1815 
1816 	return 0;
1817 }
1818 
1819 static int
1820 atl_reta_update(struct rte_eth_dev *dev,
1821 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1822 		   uint16_t reta_size)
1823 {
1824 	int i;
1825 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1826 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1827 
1828 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1829 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1830 					dev->data->nb_rx_queues - 1);
1831 
1832 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1833 	return 0;
1834 }
1835 
1836 static int
1837 atl_reta_query(struct rte_eth_dev *dev,
1838 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1839 		    uint16_t reta_size)
1840 {
1841 	int i;
1842 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1843 
1844 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1845 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1846 	reta_conf->mask = ~0U;
1847 	return 0;
1848 }
1849 
1850 static int
1851 atl_rss_hash_update(struct rte_eth_dev *dev,
1852 				 struct rte_eth_rss_conf *rss_conf)
1853 {
1854 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1855 	struct aq_hw_cfg_s *cfg =
1856 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1857 	static u8 def_rss_key[40] = {
1858 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1859 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1860 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1861 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1862 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1863 	};
1864 
1865 	cfg->is_rss = !!rss_conf->rss_hf;
1866 	if (rss_conf->rss_key) {
1867 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1868 		       rss_conf->rss_key_len);
1869 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1870 	} else {
1871 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1872 		       sizeof(def_rss_key));
1873 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1874 	}
1875 
1876 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1877 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1878 	return 0;
1879 }
1880 
1881 static int
1882 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1883 				 struct rte_eth_rss_conf *rss_conf)
1884 {
1885 	struct aq_hw_cfg_s *cfg =
1886 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1887 
1888 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1889 	if (rss_conf->rss_key) {
1890 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1891 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1892 		       rss_conf->rss_key_len);
1893 	}
1894 
1895 	return 0;
1896 }
1897 
1898 static bool
1899 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1900 {
1901 	if (strcmp(dev->device->driver->name, drv->driver.name))
1902 		return false;
1903 
1904 	return true;
1905 }
1906 
1907 bool
1908 is_atlantic_supported(struct rte_eth_dev *dev)
1909 {
1910 	return is_device_supported(dev, &rte_atl_pmd);
1911 }
1912 
1913 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1914 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1915 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1916 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
1917 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);
1918