xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static int  atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
47 
48 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
49 
50 /* VLAN stuff */
51 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
52 		uint16_t vlan_id, int on);
53 
54 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
55 
56 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
57 				     uint16_t queue_id, int on);
58 
59 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
60 			     enum rte_vlan_type vlan_type, uint16_t tpid);
61 
62 /* EEPROM */
63 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
64 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
65 			      struct rte_dev_eeprom_info *eeprom);
66 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
67 			      struct rte_dev_eeprom_info *eeprom);
68 
69 /* Regs */
70 static int atl_dev_get_regs(struct rte_eth_dev *dev,
71 			    struct rte_dev_reg_info *regs);
72 
73 /* Flow control */
74 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
75 			       struct rte_eth_fc_conf *fc_conf);
76 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
77 			       struct rte_eth_fc_conf *fc_conf);
78 
79 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
80 
81 /* Interrupts */
82 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
83 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
84 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
85 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
86 				    struct rte_intr_handle *handle);
87 static void atl_dev_interrupt_handler(void *param);
88 
89 
90 static int atl_add_mac_addr(struct rte_eth_dev *dev,
91 			    struct rte_ether_addr *mac_addr,
92 			    uint32_t index, uint32_t pool);
93 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
94 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
95 					   struct rte_ether_addr *mac_addr);
96 
97 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
98 				    struct rte_ether_addr *mc_addr_set,
99 				    uint32_t nb_mc_addr);
100 
101 /* RSS */
102 static int atl_reta_update(struct rte_eth_dev *dev,
103 			     struct rte_eth_rss_reta_entry64 *reta_conf,
104 			     uint16_t reta_size);
105 static int atl_reta_query(struct rte_eth_dev *dev,
106 			    struct rte_eth_rss_reta_entry64 *reta_conf,
107 			    uint16_t reta_size);
108 static int atl_rss_hash_update(struct rte_eth_dev *dev,
109 				 struct rte_eth_rss_conf *rss_conf);
110 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
111 				   struct rte_eth_rss_conf *rss_conf);
112 
113 
114 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
115 	struct rte_pci_device *pci_dev);
116 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
117 
118 static int atl_dev_info_get(struct rte_eth_dev *dev,
119 				struct rte_eth_dev_info *dev_info);
120 
121 /*
122  * The set of PCI devices this driver supports
123  */
124 static const struct rte_pci_id pci_id_atl_map[] = {
125 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
129 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
130 
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
137 
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
144 
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
147 	{ .vendor_id = 0, /* sentinel */ },
148 };
149 
150 static struct rte_pci_driver rte_atl_pmd = {
151 	.id_table = pci_id_atl_map,
152 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
153 	.probe = eth_atl_pci_probe,
154 	.remove = eth_atl_pci_remove,
155 };
156 
157 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
158 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
159 			| DEV_RX_OFFLOAD_UDP_CKSUM \
160 			| DEV_RX_OFFLOAD_TCP_CKSUM \
161 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
162 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
163 			| DEV_RX_OFFLOAD_VLAN_FILTER)
164 
165 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
166 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
167 			| DEV_TX_OFFLOAD_UDP_CKSUM \
168 			| DEV_TX_OFFLOAD_TCP_CKSUM \
169 			| DEV_TX_OFFLOAD_TCP_TSO \
170 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
171 			| DEV_TX_OFFLOAD_MULTI_SEGS)
172 
173 #define SFP_EEPROM_SIZE 0x100
174 
175 static const struct rte_eth_desc_lim rx_desc_lim = {
176 	.nb_max = ATL_MAX_RING_DESC,
177 	.nb_min = ATL_MIN_RING_DESC,
178 	.nb_align = ATL_RXD_ALIGN,
179 };
180 
181 static const struct rte_eth_desc_lim tx_desc_lim = {
182 	.nb_max = ATL_MAX_RING_DESC,
183 	.nb_min = ATL_MIN_RING_DESC,
184 	.nb_align = ATL_TXD_ALIGN,
185 	.nb_seg_max = ATL_TX_MAX_SEG,
186 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
187 };
188 
189 enum atl_xstats_type {
190 	XSTATS_TYPE_MSM = 0,
191 	XSTATS_TYPE_MACSEC,
192 };
193 
194 #define ATL_XSTATS_FIELD(name) { \
195 	#name, \
196 	offsetof(struct aq_stats_s, name), \
197 	XSTATS_TYPE_MSM \
198 }
199 
200 #define ATL_MACSEC_XSTATS_FIELD(name) { \
201 	#name, \
202 	offsetof(struct macsec_stats, name), \
203 	XSTATS_TYPE_MACSEC \
204 }
205 
206 struct atl_xstats_tbl_s {
207 	const char *name;
208 	unsigned int offset;
209 	enum atl_xstats_type type;
210 };
211 
212 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
213 	ATL_XSTATS_FIELD(uprc),
214 	ATL_XSTATS_FIELD(mprc),
215 	ATL_XSTATS_FIELD(bprc),
216 	ATL_XSTATS_FIELD(erpt),
217 	ATL_XSTATS_FIELD(uptc),
218 	ATL_XSTATS_FIELD(mptc),
219 	ATL_XSTATS_FIELD(bptc),
220 	ATL_XSTATS_FIELD(erpr),
221 	ATL_XSTATS_FIELD(ubrc),
222 	ATL_XSTATS_FIELD(ubtc),
223 	ATL_XSTATS_FIELD(mbrc),
224 	ATL_XSTATS_FIELD(mbtc),
225 	ATL_XSTATS_FIELD(bbrc),
226 	ATL_XSTATS_FIELD(bbtc),
227 	/* Ingress Common Counters */
228 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
229 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
230 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
231 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
232 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
233 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
234 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
235 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
236 	/* Ingress SA Counters */
237 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
239 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
240 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
244 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
245 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
246 	/* Egress Common Counters */
247 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
250 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
251 	/* Egress SC Counters */
252 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
254 	/* Egress SA Counters */
255 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
256 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
259 };
260 
261 static const struct eth_dev_ops atl_eth_dev_ops = {
262 	.dev_configure	      = atl_dev_configure,
263 	.dev_start	      = atl_dev_start,
264 	.dev_stop	      = atl_dev_stop,
265 	.dev_set_link_up      = atl_dev_set_link_up,
266 	.dev_set_link_down    = atl_dev_set_link_down,
267 	.dev_close	      = atl_dev_close,
268 	.dev_reset	      = atl_dev_reset,
269 
270 	/* PROMISC */
271 	.promiscuous_enable   = atl_dev_promiscuous_enable,
272 	.promiscuous_disable  = atl_dev_promiscuous_disable,
273 	.allmulticast_enable  = atl_dev_allmulticast_enable,
274 	.allmulticast_disable = atl_dev_allmulticast_disable,
275 
276 	/* Link */
277 	.link_update	      = atl_dev_link_update,
278 
279 	.get_reg              = atl_dev_get_regs,
280 
281 	/* Stats */
282 	.stats_get	      = atl_dev_stats_get,
283 	.xstats_get	      = atl_dev_xstats_get,
284 	.xstats_get_names     = atl_dev_xstats_get_names,
285 	.stats_reset	      = atl_dev_stats_reset,
286 	.xstats_reset	      = atl_dev_stats_reset,
287 
288 	.fw_version_get       = atl_fw_version_get,
289 	.dev_infos_get	      = atl_dev_info_get,
290 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
291 
292 	.mtu_set              = atl_dev_mtu_set,
293 
294 	/* VLAN */
295 	.vlan_filter_set      = atl_vlan_filter_set,
296 	.vlan_offload_set     = atl_vlan_offload_set,
297 	.vlan_tpid_set        = atl_vlan_tpid_set,
298 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
299 
300 	/* Queue Control */
301 	.rx_queue_start	      = atl_rx_queue_start,
302 	.rx_queue_stop	      = atl_rx_queue_stop,
303 	.rx_queue_setup       = atl_rx_queue_setup,
304 	.rx_queue_release     = atl_rx_queue_release,
305 
306 	.tx_queue_start	      = atl_tx_queue_start,
307 	.tx_queue_stop	      = atl_tx_queue_stop,
308 	.tx_queue_setup       = atl_tx_queue_setup,
309 	.tx_queue_release     = atl_tx_queue_release,
310 
311 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
312 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
313 
314 	/* EEPROM */
315 	.get_eeprom_length    = atl_dev_get_eeprom_length,
316 	.get_eeprom           = atl_dev_get_eeprom,
317 	.set_eeprom           = atl_dev_set_eeprom,
318 
319 	/* Flow Control */
320 	.flow_ctrl_get	      = atl_flow_ctrl_get,
321 	.flow_ctrl_set	      = atl_flow_ctrl_set,
322 
323 	/* MAC */
324 	.mac_addr_add	      = atl_add_mac_addr,
325 	.mac_addr_remove      = atl_remove_mac_addr,
326 	.mac_addr_set	      = atl_set_default_mac_addr,
327 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
328 	.rxq_info_get	      = atl_rxq_info_get,
329 	.txq_info_get	      = atl_txq_info_get,
330 
331 	.reta_update          = atl_reta_update,
332 	.reta_query           = atl_reta_query,
333 	.rss_hash_update      = atl_rss_hash_update,
334 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
335 };
336 
337 static inline int32_t
338 atl_reset_hw(struct aq_hw_s *hw)
339 {
340 	return hw_atl_b0_hw_reset(hw);
341 }
342 
343 static inline void
344 atl_enable_intr(struct rte_eth_dev *dev)
345 {
346 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
347 
348 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
349 }
350 
351 static void
352 atl_disable_intr(struct aq_hw_s *hw)
353 {
354 	PMD_INIT_FUNC_TRACE();
355 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
356 }
357 
358 static int
359 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
360 {
361 	struct atl_adapter *adapter = eth_dev->data->dev_private;
362 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
363 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
364 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
365 	int err = 0;
366 
367 	PMD_INIT_FUNC_TRACE();
368 
369 	eth_dev->dev_ops = &atl_eth_dev_ops;
370 
371 	eth_dev->rx_queue_count       = atl_rx_queue_count;
372 	eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
373 	eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
374 
375 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
376 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
377 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
378 
379 	/* For secondary processes, the primary process has done all the work */
380 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
381 		return 0;
382 
383 	/* Vendor and Device ID need to be set before init of shared code */
384 	hw->device_id = pci_dev->id.device_id;
385 	hw->vendor_id = pci_dev->id.vendor_id;
386 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
387 
388 	/* Hardware configuration - hardcode */
389 	adapter->hw_cfg.is_lro = false;
390 	adapter->hw_cfg.wol = false;
391 	adapter->hw_cfg.is_rss = false;
392 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
393 
394 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
395 			  AQ_NIC_RATE_5G |
396 			  AQ_NIC_RATE_2G5 |
397 			  AQ_NIC_RATE_1G |
398 			  AQ_NIC_RATE_100M;
399 
400 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
401 	adapter->hw_cfg.aq_rss.indirection_table_size =
402 		HW_ATL_B0_RSS_REDIRECTION_MAX;
403 
404 	hw->aq_nic_cfg = &adapter->hw_cfg;
405 
406 	pthread_mutex_init(&hw->mbox_mutex, NULL);
407 
408 	/* disable interrupt */
409 	atl_disable_intr(hw);
410 
411 	/* Allocate memory for storing MAC addresses */
412 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
413 					RTE_ETHER_ADDR_LEN, 0);
414 	if (eth_dev->data->mac_addrs == NULL) {
415 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
416 		return -ENOMEM;
417 	}
418 
419 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
420 	if (err)
421 		return err;
422 
423 	/* Copy the permanent MAC address */
424 	if (hw->aq_fw_ops->get_mac_permanent(hw,
425 			eth_dev->data->mac_addrs->addr_bytes) != 0)
426 		return -EINVAL;
427 
428 	/* Reset the hw statistics */
429 	atl_dev_stats_reset(eth_dev);
430 
431 	rte_intr_callback_register(intr_handle,
432 				   atl_dev_interrupt_handler, eth_dev);
433 
434 	/* enable uio/vfio intr/eventfd mapping */
435 	rte_intr_enable(intr_handle);
436 
437 	/* enable support intr */
438 	atl_enable_intr(eth_dev);
439 
440 	return err;
441 }
442 
443 static int
444 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
445 	struct rte_pci_device *pci_dev)
446 {
447 	return rte_eth_dev_pci_generic_probe(pci_dev,
448 		sizeof(struct atl_adapter), eth_atl_dev_init);
449 }
450 
451 static int
452 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
453 {
454 	return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close);
455 }
456 
457 static int
458 atl_dev_configure(struct rte_eth_dev *dev)
459 {
460 	struct atl_interrupt *intr =
461 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
462 
463 	PMD_INIT_FUNC_TRACE();
464 
465 	/* set flag to update link status after init */
466 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
467 
468 	return 0;
469 }
470 
471 /*
472  * Configure device link speed and setup link.
473  * It returns 0 on success.
474  */
475 static int
476 atl_dev_start(struct rte_eth_dev *dev)
477 {
478 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
479 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
480 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
481 	uint32_t intr_vector = 0;
482 	int status;
483 	int err;
484 
485 	PMD_INIT_FUNC_TRACE();
486 
487 	/* set adapter started */
488 	hw->adapter_stopped = 0;
489 
490 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
491 		PMD_INIT_LOG(ERR,
492 		"Invalid link_speeds for port %u, fix speed not supported",
493 				dev->data->port_id);
494 		return -EINVAL;
495 	}
496 
497 	/* disable uio/vfio intr/eventfd mapping */
498 	rte_intr_disable(intr_handle);
499 
500 	/* reinitialize adapter
501 	 * this calls reset and start
502 	 */
503 	status = atl_reset_hw(hw);
504 	if (status != 0)
505 		return -EIO;
506 
507 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
508 
509 	hw_atl_b0_hw_start(hw);
510 	/* check and configure queue intr-vector mapping */
511 	if ((rte_intr_cap_multiple(intr_handle) ||
512 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
513 	    dev->data->dev_conf.intr_conf.rxq != 0) {
514 		intr_vector = dev->data->nb_rx_queues;
515 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
516 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
517 					ATL_MAX_INTR_QUEUE_NUM);
518 			return -ENOTSUP;
519 		}
520 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
521 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
522 			return -1;
523 		}
524 	}
525 
526 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
527 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
528 				    dev->data->nb_rx_queues * sizeof(int), 0);
529 		if (intr_handle->intr_vec == NULL) {
530 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
531 				     " intr_vec", dev->data->nb_rx_queues);
532 			return -ENOMEM;
533 		}
534 	}
535 
536 	/* initialize transmission unit */
537 	atl_tx_init(dev);
538 
539 	/* This can fail when allocating mbufs for descriptor rings */
540 	err = atl_rx_init(dev);
541 	if (err) {
542 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
543 		goto error;
544 	}
545 
546 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
547 		hw->fw_ver_actual >> 24,
548 		(hw->fw_ver_actual >> 16) & 0xFF,
549 		hw->fw_ver_actual & 0xFFFF);
550 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
551 
552 	err = atl_start_queues(dev);
553 	if (err < 0) {
554 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
555 		goto error;
556 	}
557 
558 	err = atl_dev_set_link_up(dev);
559 
560 	err = hw->aq_fw_ops->update_link_status(hw);
561 
562 	if (err)
563 		goto error;
564 
565 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
566 
567 	if (rte_intr_allow_others(intr_handle)) {
568 		/* check if lsc interrupt is enabled */
569 		if (dev->data->dev_conf.intr_conf.lsc != 0)
570 			atl_dev_lsc_interrupt_setup(dev, true);
571 		else
572 			atl_dev_lsc_interrupt_setup(dev, false);
573 	} else {
574 		rte_intr_callback_unregister(intr_handle,
575 					     atl_dev_interrupt_handler, dev);
576 		if (dev->data->dev_conf.intr_conf.lsc != 0)
577 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
578 				     " no intr multiplex");
579 	}
580 
581 	/* check if rxq interrupt is enabled */
582 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
583 	    rte_intr_dp_is_en(intr_handle))
584 		atl_dev_rxq_interrupt_setup(dev);
585 
586 	/* enable uio/vfio intr/eventfd mapping */
587 	rte_intr_enable(intr_handle);
588 
589 	/* resume enabled intr since hw reset */
590 	atl_enable_intr(dev);
591 
592 	return 0;
593 
594 error:
595 	atl_stop_queues(dev);
596 	return -EIO;
597 }
598 
599 /*
600  * Stop device: disable rx and tx functions to allow for reconfiguring.
601  */
602 static void
603 atl_dev_stop(struct rte_eth_dev *dev)
604 {
605 	struct rte_eth_link link;
606 	struct aq_hw_s *hw =
607 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
609 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
610 
611 	PMD_INIT_FUNC_TRACE();
612 
613 	/* disable interrupts */
614 	atl_disable_intr(hw);
615 
616 	/* reset the NIC */
617 	atl_reset_hw(hw);
618 	hw->adapter_stopped = 1;
619 
620 	atl_stop_queues(dev);
621 
622 	/* Clear stored conf */
623 	dev->data->scattered_rx = 0;
624 	dev->data->lro = 0;
625 
626 	/* Clear recorded link status */
627 	memset(&link, 0, sizeof(link));
628 	rte_eth_linkstatus_set(dev, &link);
629 
630 	if (!rte_intr_allow_others(intr_handle))
631 		/* resume to the default handler */
632 		rte_intr_callback_register(intr_handle,
633 					   atl_dev_interrupt_handler,
634 					   (void *)dev);
635 
636 	/* Clean datapath event and queue/vec mapping */
637 	rte_intr_efd_disable(intr_handle);
638 	if (intr_handle->intr_vec != NULL) {
639 		rte_free(intr_handle->intr_vec);
640 		intr_handle->intr_vec = NULL;
641 	}
642 }
643 
644 /*
645  * Set device link up: enable tx.
646  */
647 static int
648 atl_dev_set_link_up(struct rte_eth_dev *dev)
649 {
650 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
651 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
652 	uint32_t speed_mask = 0;
653 
654 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
655 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
656 	} else {
657 		if (link_speeds & ETH_LINK_SPEED_10G)
658 			speed_mask |= AQ_NIC_RATE_10G;
659 		if (link_speeds & ETH_LINK_SPEED_5G)
660 			speed_mask |= AQ_NIC_RATE_5G;
661 		if (link_speeds & ETH_LINK_SPEED_1G)
662 			speed_mask |= AQ_NIC_RATE_1G;
663 		if (link_speeds & ETH_LINK_SPEED_2_5G)
664 			speed_mask |=  AQ_NIC_RATE_2G5;
665 		if (link_speeds & ETH_LINK_SPEED_100M)
666 			speed_mask |= AQ_NIC_RATE_100M;
667 	}
668 
669 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
670 }
671 
672 /*
673  * Set device link down: disable tx.
674  */
675 static int
676 atl_dev_set_link_down(struct rte_eth_dev *dev)
677 {
678 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
679 
680 	return hw->aq_fw_ops->set_link_speed(hw, 0);
681 }
682 
683 /*
684  * Reset and stop device.
685  */
686 static int
687 atl_dev_close(struct rte_eth_dev *dev)
688 {
689 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
690 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
691 	struct aq_hw_s *hw;
692 
693 	PMD_INIT_FUNC_TRACE();
694 
695 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
696 		return 0;
697 
698 	hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
699 
700 	atl_dev_stop(dev);
701 
702 	atl_free_queues(dev);
703 
704 	dev->dev_ops = NULL;
705 	dev->rx_pkt_burst = NULL;
706 	dev->tx_pkt_burst = NULL;
707 
708 	/* disable uio intr before callback unregister */
709 	rte_intr_disable(intr_handle);
710 	rte_intr_callback_unregister(intr_handle,
711 				     atl_dev_interrupt_handler, dev);
712 
713 	pthread_mutex_destroy(&hw->mbox_mutex);
714 
715 	return 0;
716 }
717 
718 static int
719 atl_dev_reset(struct rte_eth_dev *dev)
720 {
721 	int ret;
722 
723 	ret = atl_dev_close(dev);
724 	if (ret)
725 		return ret;
726 
727 	ret = eth_atl_dev_init(dev);
728 
729 	return ret;
730 }
731 
732 static int
733 atl_dev_configure_macsec(struct rte_eth_dev *dev)
734 {
735 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
736 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
737 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
738 	struct macsec_msg_fw_request msg_macsec;
739 	struct macsec_msg_fw_response response;
740 
741 	if (!aqcfg->common.macsec_enabled ||
742 	    hw->aq_fw_ops->send_macsec_req == NULL)
743 		return 0;
744 
745 	memset(&msg_macsec, 0, sizeof(msg_macsec));
746 
747 	/* Creating set of sc/sa structures from parameters provided by DPDK */
748 
749 	/* Configure macsec */
750 	msg_macsec.msg_type = macsec_cfg_msg;
751 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
752 	msg_macsec.cfg.interrupts_enabled = 1;
753 
754 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
755 
756 	if (response.result)
757 		return -1;
758 
759 	memset(&msg_macsec, 0, sizeof(msg_macsec));
760 
761 	/* Configure TX SC */
762 
763 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
764 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
765 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
766 
767 	/* MAC addr for TX */
768 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
769 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
770 	msg_macsec.txsc.sa_mask = 0x3f;
771 
772 	msg_macsec.txsc.da_mask = 0;
773 	msg_macsec.txsc.tci = 0x0B;
774 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
775 
776 	/*
777 	 * Creating SCI (Secure Channel Identifier).
778 	 * SCI constructed from Source MAC and Port identifier
779 	 */
780 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
781 			       (msg_macsec.txsc.mac_sa[0] >> 16);
782 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
783 
784 	uint32_t port_identifier = 1;
785 
786 	msg_macsec.txsc.sci[1] = sci_hi_part;
787 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
788 
789 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
790 
791 	if (response.result)
792 		return -1;
793 
794 	memset(&msg_macsec, 0, sizeof(msg_macsec));
795 
796 	/* Configure RX SC */
797 
798 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
799 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
800 	msg_macsec.rxsc.replay_protect =
801 		aqcfg->common.replay_protection_enabled;
802 	msg_macsec.rxsc.anti_replay_window = 0;
803 
804 	/* MAC addr for RX */
805 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
806 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
807 	msg_macsec.rxsc.da_mask = 0;//0x3f;
808 
809 	msg_macsec.rxsc.sa_mask = 0;
810 
811 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
812 
813 	if (response.result)
814 		return -1;
815 
816 	memset(&msg_macsec, 0, sizeof(msg_macsec));
817 
818 	/* Configure RX SC */
819 
820 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
821 	msg_macsec.txsa.index = aqcfg->txsa.idx;
822 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
823 
824 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
825 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
826 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
827 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
828 
829 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
830 
831 	if (response.result)
832 		return -1;
833 
834 	memset(&msg_macsec, 0, sizeof(msg_macsec));
835 
836 	/* Configure RX SA */
837 
838 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
839 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
840 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
841 
842 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
843 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
844 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
845 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
846 
847 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
848 
849 	if (response.result)
850 		return -1;
851 
852 	return 0;
853 }
854 
855 int atl_macsec_enable(struct rte_eth_dev *dev,
856 		      uint8_t encr, uint8_t repl_prot)
857 {
858 	struct aq_hw_cfg_s *cfg =
859 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
860 
861 	cfg->aq_macsec.common.macsec_enabled = 1;
862 	cfg->aq_macsec.common.encryption_enabled = encr;
863 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
864 
865 	return 0;
866 }
867 
868 int atl_macsec_disable(struct rte_eth_dev *dev)
869 {
870 	struct aq_hw_cfg_s *cfg =
871 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
872 
873 	cfg->aq_macsec.common.macsec_enabled = 0;
874 
875 	return 0;
876 }
877 
878 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
879 {
880 	struct aq_hw_cfg_s *cfg =
881 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
882 
883 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
884 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
885 		RTE_ETHER_ADDR_LEN);
886 
887 	return 0;
888 }
889 
890 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
891 			   uint8_t *mac, uint16_t pi)
892 {
893 	struct aq_hw_cfg_s *cfg =
894 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
895 
896 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
897 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
898 		RTE_ETHER_ADDR_LEN);
899 	cfg->aq_macsec.rxsc.pi = pi;
900 
901 	return 0;
902 }
903 
904 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
905 			   uint8_t idx, uint8_t an,
906 			   uint32_t pn, uint8_t *key)
907 {
908 	struct aq_hw_cfg_s *cfg =
909 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
910 
911 	cfg->aq_macsec.txsa.idx = idx;
912 	cfg->aq_macsec.txsa.pn = pn;
913 	cfg->aq_macsec.txsa.an = an;
914 
915 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
916 	return 0;
917 }
918 
919 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
920 			   uint8_t idx, uint8_t an,
921 			   uint32_t pn, uint8_t *key)
922 {
923 	struct aq_hw_cfg_s *cfg =
924 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
925 
926 	cfg->aq_macsec.rxsa.idx = idx;
927 	cfg->aq_macsec.rxsa.pn = pn;
928 	cfg->aq_macsec.rxsa.an = an;
929 
930 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
931 	return 0;
932 }
933 
934 static int
935 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
936 {
937 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
938 	struct aq_hw_s *hw = &adapter->hw;
939 	struct atl_sw_stats *swstats = &adapter->sw_stats;
940 	unsigned int i;
941 
942 	hw->aq_fw_ops->update_stats(hw);
943 
944 	/* Fill out the rte_eth_stats statistics structure */
945 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
946 	stats->ibytes = hw->curr_stats.dma_oct_rc;
947 	stats->imissed = hw->curr_stats.dpc;
948 	stats->ierrors = hw->curr_stats.erpt;
949 
950 	stats->opackets = hw->curr_stats.dma_pkt_tc;
951 	stats->obytes = hw->curr_stats.dma_oct_tc;
952 	stats->oerrors = 0;
953 
954 	stats->rx_nombuf = swstats->rx_nombuf;
955 
956 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
957 		stats->q_ipackets[i] = swstats->q_ipackets[i];
958 		stats->q_opackets[i] = swstats->q_opackets[i];
959 		stats->q_ibytes[i] = swstats->q_ibytes[i];
960 		stats->q_obytes[i] = swstats->q_obytes[i];
961 		stats->q_errors[i] = swstats->q_errors[i];
962 	}
963 	return 0;
964 }
965 
966 static int
967 atl_dev_stats_reset(struct rte_eth_dev *dev)
968 {
969 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
970 	struct aq_hw_s *hw = &adapter->hw;
971 
972 	hw->aq_fw_ops->update_stats(hw);
973 
974 	/* Reset software totals */
975 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
976 
977 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
978 
979 	return 0;
980 }
981 
982 static int
983 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
984 {
985 	struct atl_adapter *adapter =
986 		(struct atl_adapter *)dev->data->dev_private;
987 
988 	struct aq_hw_s *hw = &adapter->hw;
989 	unsigned int i, count = 0;
990 
991 	for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
992 		if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
993 			((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
994 			continue;
995 
996 		count++;
997 	}
998 
999 	return count;
1000 }
1001 
1002 static int
1003 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1004 			 struct rte_eth_xstat_name *xstats_names,
1005 			 unsigned int size)
1006 {
1007 	unsigned int i;
1008 	unsigned int count = atl_dev_xstats_get_count(dev);
1009 
1010 	if (xstats_names) {
1011 		for (i = 0; i < size && i < count; i++) {
1012 			snprintf(xstats_names[i].name,
1013 				RTE_ETH_XSTATS_NAME_SIZE, "%s",
1014 				atl_xstats_tbl[i].name);
1015 		}
1016 	}
1017 
1018 	return count;
1019 }
1020 
1021 static int
1022 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1023 		   unsigned int n)
1024 {
1025 	struct atl_adapter *adapter = dev->data->dev_private;
1026 	struct aq_hw_s *hw = &adapter->hw;
1027 	struct get_stats req = { 0 };
1028 	struct macsec_msg_fw_request msg = { 0 };
1029 	struct macsec_msg_fw_response resp = { 0 };
1030 	int err = -1;
1031 	unsigned int i;
1032 	unsigned int count = atl_dev_xstats_get_count(dev);
1033 
1034 	if (!stats)
1035 		return count;
1036 
1037 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1038 		req.ingress_sa_index = 0xff;
1039 		req.egress_sc_index = 0xff;
1040 		req.egress_sa_index = 0xff;
1041 
1042 		msg.msg_type = macsec_get_stats_msg;
1043 		msg.stats = req;
1044 
1045 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1046 	}
1047 
1048 	for (i = 0; i < n && i < count; i++) {
1049 		stats[i].id = i;
1050 
1051 		switch (atl_xstats_tbl[i].type) {
1052 		case XSTATS_TYPE_MSM:
1053 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1054 					 atl_xstats_tbl[i].offset);
1055 			break;
1056 		case XSTATS_TYPE_MACSEC:
1057 			if (!err) {
1058 				stats[i].value =
1059 					*(u64 *)((uint8_t *)&resp.stats +
1060 					atl_xstats_tbl[i].offset);
1061 			}
1062 			break;
1063 		}
1064 	}
1065 
1066 	return i;
1067 }
1068 
1069 static int
1070 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1071 {
1072 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1073 	uint32_t fw_ver = 0;
1074 	unsigned int ret = 0;
1075 
1076 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1077 	if (ret)
1078 		return -EIO;
1079 
1080 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1081 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1082 
1083 	ret += 1; /* add string null-terminator */
1084 
1085 	if (fw_size < ret)
1086 		return ret;
1087 
1088 	return 0;
1089 }
1090 
1091 static int
1092 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1093 {
1094 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1095 
1096 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1097 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1098 
1099 	dev_info->min_rx_bufsize = 1024;
1100 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1101 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1102 	dev_info->max_vfs = pci_dev->max_vfs;
1103 
1104 	dev_info->max_hash_mac_addrs = 0;
1105 	dev_info->max_vmdq_pools = 0;
1106 	dev_info->vmdq_queue_num = 0;
1107 
1108 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1109 
1110 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1111 
1112 
1113 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1114 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1115 	};
1116 
1117 	dev_info->default_txconf = (struct rte_eth_txconf) {
1118 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1119 	};
1120 
1121 	dev_info->rx_desc_lim = rx_desc_lim;
1122 	dev_info->tx_desc_lim = tx_desc_lim;
1123 
1124 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1125 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1126 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1127 
1128 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1129 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1130 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1131 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1132 
1133 	return 0;
1134 }
1135 
1136 static const uint32_t *
1137 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1138 {
1139 	static const uint32_t ptypes[] = {
1140 		RTE_PTYPE_L2_ETHER,
1141 		RTE_PTYPE_L2_ETHER_ARP,
1142 		RTE_PTYPE_L2_ETHER_VLAN,
1143 		RTE_PTYPE_L3_IPV4,
1144 		RTE_PTYPE_L3_IPV6,
1145 		RTE_PTYPE_L4_TCP,
1146 		RTE_PTYPE_L4_UDP,
1147 		RTE_PTYPE_L4_SCTP,
1148 		RTE_PTYPE_L4_ICMP,
1149 		RTE_PTYPE_UNKNOWN
1150 	};
1151 
1152 	if (dev->rx_pkt_burst == atl_recv_pkts)
1153 		return ptypes;
1154 
1155 	return NULL;
1156 }
1157 
1158 static void
1159 atl_dev_delayed_handler(void *param)
1160 {
1161 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1162 
1163 	atl_dev_configure_macsec(dev);
1164 }
1165 
1166 
1167 /* return 0 means link status changed, -1 means not changed */
1168 static int
1169 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1170 {
1171 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1172 	struct rte_eth_link link, old;
1173 	u32 fc = AQ_NIC_FC_OFF;
1174 	int err = 0;
1175 
1176 	link.link_status = ETH_LINK_DOWN;
1177 	link.link_speed = 0;
1178 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1179 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1180 	memset(&old, 0, sizeof(old));
1181 
1182 	/* load old link status */
1183 	rte_eth_linkstatus_get(dev, &old);
1184 
1185 	/* read current link status */
1186 	err = hw->aq_fw_ops->update_link_status(hw);
1187 
1188 	if (err)
1189 		return 0;
1190 
1191 	if (hw->aq_link_status.mbps == 0) {
1192 		/* write default (down) link status */
1193 		rte_eth_linkstatus_set(dev, &link);
1194 		if (link.link_status == old.link_status)
1195 			return -1;
1196 		return 0;
1197 	}
1198 
1199 	link.link_status = ETH_LINK_UP;
1200 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1201 	link.link_speed = hw->aq_link_status.mbps;
1202 
1203 	rte_eth_linkstatus_set(dev, &link);
1204 
1205 	if (link.link_status == old.link_status)
1206 		return -1;
1207 
1208 	/* Driver has to update flow control settings on RX block
1209 	 * on any link event.
1210 	 * We should query FW whether it negotiated FC.
1211 	 */
1212 	if (hw->aq_fw_ops->get_flow_control) {
1213 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1214 		hw_atl_b0_set_fc(hw, fc, 0U);
1215 	}
1216 
1217 	if (rte_eal_alarm_set(1000 * 1000,
1218 			      atl_dev_delayed_handler, (void *)dev) < 0)
1219 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1220 
1221 	return 0;
1222 }
1223 
1224 static int
1225 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1226 {
1227 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1228 
1229 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1230 
1231 	return 0;
1232 }
1233 
1234 static int
1235 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1236 {
1237 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1238 
1239 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1240 
1241 	return 0;
1242 }
1243 
1244 static int
1245 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1246 {
1247 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1248 
1249 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1250 
1251 	return 0;
1252 }
1253 
1254 static int
1255 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1256 {
1257 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1258 
1259 	if (dev->data->promiscuous == 1)
1260 		return 0; /* must remain in all_multicast mode */
1261 
1262 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1263 
1264 	return 0;
1265 }
1266 
1267 /**
1268  * It clears the interrupt causes and enables the interrupt.
1269  * It will be called once only during nic initialized.
1270  *
1271  * @param dev
1272  *  Pointer to struct rte_eth_dev.
1273  * @param on
1274  *  Enable or Disable.
1275  *
1276  * @return
1277  *  - On success, zero.
1278  *  - On failure, a negative value.
1279  */
1280 
1281 static int
1282 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1283 {
1284 	atl_dev_link_status_print(dev);
1285 	return 0;
1286 }
1287 
1288 static int
1289 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1290 {
1291 	return 0;
1292 }
1293 
1294 
1295 static int
1296 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1297 {
1298 	struct atl_interrupt *intr =
1299 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1300 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1301 	u64 cause = 0;
1302 
1303 	hw_atl_b0_hw_irq_read(hw, &cause);
1304 
1305 	atl_disable_intr(hw);
1306 
1307 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1308 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1309 
1310 	return 0;
1311 }
1312 
1313 /**
1314  * It gets and then prints the link status.
1315  *
1316  * @param dev
1317  *  Pointer to struct rte_eth_dev.
1318  *
1319  * @return
1320  *  - On success, zero.
1321  *  - On failure, a negative value.
1322  */
1323 static void
1324 atl_dev_link_status_print(struct rte_eth_dev *dev)
1325 {
1326 	struct rte_eth_link link;
1327 
1328 	memset(&link, 0, sizeof(link));
1329 	rte_eth_linkstatus_get(dev, &link);
1330 	if (link.link_status) {
1331 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1332 					(int)(dev->data->port_id),
1333 					(unsigned int)link.link_speed,
1334 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1335 					"full-duplex" : "half-duplex");
1336 	} else {
1337 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1338 				(int)(dev->data->port_id));
1339 	}
1340 
1341 
1342 #ifdef DEBUG
1343 {
1344 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1345 
1346 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1347 				pci_dev->addr.domain,
1348 				pci_dev->addr.bus,
1349 				pci_dev->addr.devid,
1350 				pci_dev->addr.function);
1351 }
1352 #endif
1353 
1354 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1355 }
1356 
1357 /*
1358  * It executes link_update after knowing an interrupt occurred.
1359  *
1360  * @param dev
1361  *  Pointer to struct rte_eth_dev.
1362  *
1363  * @return
1364  *  - On success, zero.
1365  *  - On failure, a negative value.
1366  */
1367 static int
1368 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1369 			   struct rte_intr_handle *intr_handle)
1370 {
1371 	struct atl_interrupt *intr =
1372 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1373 	struct atl_adapter *adapter = dev->data->dev_private;
1374 	struct aq_hw_s *hw = &adapter->hw;
1375 
1376 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1377 		goto done;
1378 
1379 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1380 
1381 	/* Notify userapp if link status changed */
1382 	if (!atl_dev_link_update(dev, 0)) {
1383 		atl_dev_link_status_print(dev);
1384 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1385 	} else {
1386 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1387 			goto done;
1388 
1389 		/* Check macsec Keys expired */
1390 		struct get_stats req = { 0 };
1391 		struct macsec_msg_fw_request msg = { 0 };
1392 		struct macsec_msg_fw_response resp = { 0 };
1393 
1394 		req.ingress_sa_index = 0x0;
1395 		req.egress_sc_index = 0x0;
1396 		req.egress_sa_index = 0x0;
1397 		msg.msg_type = macsec_get_stats_msg;
1398 		msg.stats = req;
1399 
1400 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1401 		if (err) {
1402 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1403 			goto done;
1404 		}
1405 		if (resp.stats.egress_threshold_expired ||
1406 		    resp.stats.ingress_threshold_expired ||
1407 		    resp.stats.egress_expired ||
1408 		    resp.stats.ingress_expired) {
1409 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1410 			rte_eth_dev_callback_process(dev,
1411 				RTE_ETH_EVENT_MACSEC, NULL);
1412 		}
1413 	}
1414 done:
1415 	atl_enable_intr(dev);
1416 	rte_intr_ack(intr_handle);
1417 
1418 	return 0;
1419 }
1420 
1421 /**
1422  * Interrupt handler triggered by NIC  for handling
1423  * specific interrupt.
1424  *
1425  * @param handle
1426  *  Pointer to interrupt handle.
1427  * @param param
1428  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1429  *
1430  * @return
1431  *  void
1432  */
1433 static void
1434 atl_dev_interrupt_handler(void *param)
1435 {
1436 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1437 
1438 	atl_dev_interrupt_get_status(dev);
1439 	atl_dev_interrupt_action(dev, dev->intr_handle);
1440 }
1441 
1442 
1443 static int
1444 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1445 {
1446 	return SFP_EEPROM_SIZE;
1447 }
1448 
1449 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1450 		       struct rte_dev_eeprom_info *eeprom)
1451 {
1452 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1453 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1454 
1455 	if (hw->aq_fw_ops->get_eeprom == NULL)
1456 		return -ENOTSUP;
1457 
1458 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1459 	    eeprom->data == NULL)
1460 		return -EINVAL;
1461 
1462 	if (eeprom->magic > 0x7F)
1463 		return -EINVAL;
1464 
1465 	if (eeprom->magic)
1466 		dev_addr = eeprom->magic;
1467 
1468 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1469 					 eeprom->length, eeprom->offset);
1470 }
1471 
1472 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1473 		       struct rte_dev_eeprom_info *eeprom)
1474 {
1475 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1476 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1477 
1478 	if (hw->aq_fw_ops->set_eeprom == NULL)
1479 		return -ENOTSUP;
1480 
1481 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1482 	    eeprom->data == NULL)
1483 		return -EINVAL;
1484 
1485 	if (eeprom->magic > 0x7F)
1486 		return -EINVAL;
1487 
1488 	if (eeprom->magic)
1489 		dev_addr = eeprom->magic;
1490 
1491 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1492 					 eeprom->length, eeprom->offset);
1493 }
1494 
1495 static int
1496 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1497 {
1498 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1499 	u32 mif_id;
1500 	int err;
1501 
1502 	if (regs->data == NULL) {
1503 		regs->length = hw_atl_utils_hw_get_reg_length();
1504 		regs->width = sizeof(u32);
1505 		return 0;
1506 	}
1507 
1508 	/* Only full register dump is supported */
1509 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1510 		return -ENOTSUP;
1511 
1512 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1513 
1514 	/* Device version */
1515 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1516 	regs->version = mif_id & 0xFFU;
1517 
1518 	return err;
1519 }
1520 
1521 static int
1522 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1523 {
1524 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1525 	u32 fc = AQ_NIC_FC_OFF;
1526 
1527 	if (hw->aq_fw_ops->get_flow_control == NULL)
1528 		return -ENOTSUP;
1529 
1530 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1531 
1532 	if (fc == AQ_NIC_FC_OFF)
1533 		fc_conf->mode = RTE_FC_NONE;
1534 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1535 		fc_conf->mode = RTE_FC_FULL;
1536 	else if (fc & AQ_NIC_FC_RX)
1537 		fc_conf->mode = RTE_FC_RX_PAUSE;
1538 	else if (fc & AQ_NIC_FC_TX)
1539 		fc_conf->mode = RTE_FC_TX_PAUSE;
1540 
1541 	return 0;
1542 }
1543 
1544 static int
1545 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1546 {
1547 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1548 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1549 
1550 
1551 	if (hw->aq_fw_ops->set_flow_control == NULL)
1552 		return -ENOTSUP;
1553 
1554 	if (fc_conf->mode == RTE_FC_NONE)
1555 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1556 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1557 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1558 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1559 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1560 	else if (fc_conf->mode == RTE_FC_FULL)
1561 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1562 
1563 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1564 		return hw->aq_fw_ops->set_flow_control(hw);
1565 
1566 	return 0;
1567 }
1568 
1569 static int
1570 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1571 		    u8 *mac_addr, bool enable)
1572 {
1573 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1574 	unsigned int h = 0U;
1575 	unsigned int l = 0U;
1576 	int err;
1577 
1578 	if (mac_addr) {
1579 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1580 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1581 			(mac_addr[4] << 8) | mac_addr[5];
1582 	}
1583 
1584 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1585 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1586 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1587 
1588 	if (enable)
1589 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1590 
1591 	err = aq_hw_err_from_flags(hw);
1592 
1593 	return err;
1594 }
1595 
1596 static int
1597 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1598 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1599 {
1600 	if (rte_is_zero_ether_addr(mac_addr)) {
1601 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1602 		return -EINVAL;
1603 	}
1604 
1605 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1606 }
1607 
1608 static void
1609 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1610 {
1611 	atl_update_mac_addr(dev, index, NULL, false);
1612 }
1613 
1614 static int
1615 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1616 {
1617 	atl_remove_mac_addr(dev, 0);
1618 	atl_add_mac_addr(dev, addr, 0, 0);
1619 	return 0;
1620 }
1621 
1622 static int
1623 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1624 {
1625 	struct rte_eth_dev_info dev_info;
1626 	int ret;
1627 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1628 
1629 	ret = atl_dev_info_get(dev, &dev_info);
1630 	if (ret != 0)
1631 		return ret;
1632 
1633 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1634 		return -EINVAL;
1635 
1636 	/* update max frame size */
1637 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1638 
1639 	return 0;
1640 }
1641 
1642 static int
1643 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1644 {
1645 	struct aq_hw_cfg_s *cfg =
1646 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1647 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1648 	int err = 0;
1649 	int i = 0;
1650 
1651 	PMD_INIT_FUNC_TRACE();
1652 
1653 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1654 		if (cfg->vlan_filter[i] == vlan_id) {
1655 			if (!on) {
1656 				/* Disable VLAN filter. */
1657 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1658 
1659 				/* Clear VLAN filter entry */
1660 				cfg->vlan_filter[i] = 0;
1661 			}
1662 			break;
1663 		}
1664 	}
1665 
1666 	/* VLAN_ID was not found. So, nothing to delete. */
1667 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1668 		goto exit;
1669 
1670 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1671 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1672 		goto exit;
1673 
1674 	/* Try to found free VLAN filter to add new VLAN_ID */
1675 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1676 		if (cfg->vlan_filter[i] == 0)
1677 			break;
1678 	}
1679 
1680 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1681 		/* We have no free VLAN filter to add new VLAN_ID*/
1682 		err = -ENOMEM;
1683 		goto exit;
1684 	}
1685 
1686 	cfg->vlan_filter[i] = vlan_id;
1687 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1688 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1689 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1690 
1691 exit:
1692 	/* Enable VLAN promisc mode if vlan_filter empty  */
1693 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1694 		if (cfg->vlan_filter[i] != 0)
1695 			break;
1696 	}
1697 
1698 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1699 
1700 	return err;
1701 }
1702 
1703 static int
1704 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1705 {
1706 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1707 	struct aq_hw_cfg_s *cfg =
1708 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1709 	int i;
1710 
1711 	PMD_INIT_FUNC_TRACE();
1712 
1713 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1714 		if (cfg->vlan_filter[i])
1715 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1716 	}
1717 	return 0;
1718 }
1719 
1720 static int
1721 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1722 {
1723 	struct aq_hw_cfg_s *cfg =
1724 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1725 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1726 	int ret = 0;
1727 	int i;
1728 
1729 	PMD_INIT_FUNC_TRACE();
1730 
1731 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1732 
1733 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1734 
1735 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1736 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1737 
1738 	if (mask & ETH_VLAN_EXTEND_MASK)
1739 		ret = -ENOTSUP;
1740 
1741 	return ret;
1742 }
1743 
1744 static int
1745 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1746 		  uint16_t tpid)
1747 {
1748 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1749 	int err = 0;
1750 
1751 	PMD_INIT_FUNC_TRACE();
1752 
1753 	switch (vlan_type) {
1754 	case ETH_VLAN_TYPE_INNER:
1755 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1756 		break;
1757 	case ETH_VLAN_TYPE_OUTER:
1758 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1759 		break;
1760 	default:
1761 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1762 		err = -ENOTSUP;
1763 	}
1764 
1765 	return err;
1766 }
1767 
1768 static void
1769 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1770 {
1771 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1772 
1773 	PMD_INIT_FUNC_TRACE();
1774 
1775 	if (queue_id > dev->data->nb_rx_queues) {
1776 		PMD_DRV_LOG(ERR, "Invalid queue id");
1777 		return;
1778 	}
1779 
1780 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1781 }
1782 
1783 static int
1784 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1785 			  struct rte_ether_addr *mc_addr_set,
1786 			  uint32_t nb_mc_addr)
1787 {
1788 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1789 	u32 i;
1790 
1791 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1792 		return -EINVAL;
1793 
1794 	/* Update whole uc filters table */
1795 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1796 		u8 *mac_addr = NULL;
1797 		u32 l = 0, h = 0;
1798 
1799 		if (i < nb_mc_addr) {
1800 			mac_addr = mc_addr_set[i].addr_bytes;
1801 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1802 				(mac_addr[4] << 8) | mac_addr[5];
1803 			h = (mac_addr[0] << 8) | mac_addr[1];
1804 		}
1805 
1806 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1807 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1808 							HW_ATL_B0_MAC_MIN + i);
1809 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1810 							HW_ATL_B0_MAC_MIN + i);
1811 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1812 					   HW_ATL_B0_MAC_MIN + i);
1813 	}
1814 
1815 	return 0;
1816 }
1817 
1818 static int
1819 atl_reta_update(struct rte_eth_dev *dev,
1820 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1821 		   uint16_t reta_size)
1822 {
1823 	int i;
1824 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1825 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1826 
1827 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1828 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1829 					dev->data->nb_rx_queues - 1);
1830 
1831 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1832 	return 0;
1833 }
1834 
1835 static int
1836 atl_reta_query(struct rte_eth_dev *dev,
1837 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1838 		    uint16_t reta_size)
1839 {
1840 	int i;
1841 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1842 
1843 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1844 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1845 	reta_conf->mask = ~0U;
1846 	return 0;
1847 }
1848 
1849 static int
1850 atl_rss_hash_update(struct rte_eth_dev *dev,
1851 				 struct rte_eth_rss_conf *rss_conf)
1852 {
1853 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1854 	struct aq_hw_cfg_s *cfg =
1855 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1856 	static u8 def_rss_key[40] = {
1857 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1858 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1859 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1860 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1861 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1862 	};
1863 
1864 	cfg->is_rss = !!rss_conf->rss_hf;
1865 	if (rss_conf->rss_key) {
1866 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1867 		       rss_conf->rss_key_len);
1868 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1869 	} else {
1870 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1871 		       sizeof(def_rss_key));
1872 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1873 	}
1874 
1875 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1876 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1877 	return 0;
1878 }
1879 
1880 static int
1881 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1882 				 struct rte_eth_rss_conf *rss_conf)
1883 {
1884 	struct aq_hw_cfg_s *cfg =
1885 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1886 
1887 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1888 	if (rss_conf->rss_key) {
1889 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1890 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1891 		       rss_conf->rss_key_len);
1892 	}
1893 
1894 	return 0;
1895 }
1896 
1897 static bool
1898 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1899 {
1900 	if (strcmp(dev->device->driver->name, drv->driver.name))
1901 		return false;
1902 
1903 	return true;
1904 }
1905 
1906 bool
1907 is_atlantic_supported(struct rte_eth_dev *dev)
1908 {
1909 	return is_device_supported(dev, &rte_atl_pmd);
1910 }
1911 
1912 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1913 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1914 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1915 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
1916 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);
1917