xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision 200bc52e5aa0d72e70464c9cd22b55cf536ed13c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19 
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32 
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 				    struct rte_eth_xstat_name *xstats_names,
35 				    unsigned int size);
36 
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 				struct rte_eth_stats *stats);
39 
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 			      struct rte_eth_xstat *stats, unsigned int n);
42 
43 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
44 
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 			      size_t fw_size);
47 
48 static void atl_dev_info_get(struct rte_eth_dev *dev,
49 			       struct rte_eth_dev_info *dev_info);
50 
51 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
52 
53 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 
55 /* VLAN stuff */
56 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
57 		uint16_t vlan_id, int on);
58 
59 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
60 
61 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
62 				     uint16_t queue_id, int on);
63 
64 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
65 			     enum rte_vlan_type vlan_type, uint16_t tpid);
66 
67 /* EEPROM */
68 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
69 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
72 			      struct rte_dev_eeprom_info *eeprom);
73 
74 /* Regs */
75 static int atl_dev_get_regs(struct rte_eth_dev *dev,
76 			    struct rte_dev_reg_info *regs);
77 
78 /* Flow control */
79 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
80 			       struct rte_eth_fc_conf *fc_conf);
81 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
82 			       struct rte_eth_fc_conf *fc_conf);
83 
84 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
85 
86 /* Interrupts */
87 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
88 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
89 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
90 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
91 				    struct rte_intr_handle *handle);
92 static void atl_dev_interrupt_handler(void *param);
93 
94 
95 static int atl_add_mac_addr(struct rte_eth_dev *dev,
96 			    struct rte_ether_addr *mac_addr,
97 			    uint32_t index, uint32_t pool);
98 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
99 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
100 					   struct rte_ether_addr *mac_addr);
101 
102 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
103 				    struct rte_ether_addr *mc_addr_set,
104 				    uint32_t nb_mc_addr);
105 
106 /* RSS */
107 static int atl_reta_update(struct rte_eth_dev *dev,
108 			     struct rte_eth_rss_reta_entry64 *reta_conf,
109 			     uint16_t reta_size);
110 static int atl_reta_query(struct rte_eth_dev *dev,
111 			    struct rte_eth_rss_reta_entry64 *reta_conf,
112 			    uint16_t reta_size);
113 static int atl_rss_hash_update(struct rte_eth_dev *dev,
114 				 struct rte_eth_rss_conf *rss_conf);
115 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
116 				   struct rte_eth_rss_conf *rss_conf);
117 
118 
119 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
120 	struct rte_pci_device *pci_dev);
121 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
122 
123 static void atl_dev_info_get(struct rte_eth_dev *dev,
124 				struct rte_eth_dev_info *dev_info);
125 
126 int atl_logtype_init;
127 int atl_logtype_driver;
128 
129 /*
130  * The set of PCI devices this driver supports
131  */
132 static const struct rte_pci_id pci_id_atl_map[] = {
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
138 
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
145 
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
151 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
152 
153 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
154 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
155 	{ .vendor_id = 0, /* sentinel */ },
156 };
157 
158 static struct rte_pci_driver rte_atl_pmd = {
159 	.id_table = pci_id_atl_map,
160 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
161 		     RTE_PCI_DRV_IOVA_AS_VA,
162 	.probe = eth_atl_pci_probe,
163 	.remove = eth_atl_pci_remove,
164 };
165 
166 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
167 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
168 			| DEV_RX_OFFLOAD_UDP_CKSUM \
169 			| DEV_RX_OFFLOAD_TCP_CKSUM \
170 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
171 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
172 			| DEV_RX_OFFLOAD_VLAN_FILTER)
173 
174 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
175 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
176 			| DEV_TX_OFFLOAD_UDP_CKSUM \
177 			| DEV_TX_OFFLOAD_TCP_CKSUM \
178 			| DEV_TX_OFFLOAD_TCP_TSO \
179 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
180 			| DEV_TX_OFFLOAD_MULTI_SEGS)
181 
182 #define SFP_EEPROM_SIZE 0x100
183 
184 static const struct rte_eth_desc_lim rx_desc_lim = {
185 	.nb_max = ATL_MAX_RING_DESC,
186 	.nb_min = ATL_MIN_RING_DESC,
187 	.nb_align = ATL_RXD_ALIGN,
188 };
189 
190 static const struct rte_eth_desc_lim tx_desc_lim = {
191 	.nb_max = ATL_MAX_RING_DESC,
192 	.nb_min = ATL_MIN_RING_DESC,
193 	.nb_align = ATL_TXD_ALIGN,
194 	.nb_seg_max = ATL_TX_MAX_SEG,
195 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
196 };
197 
198 enum atl_xstats_type {
199 	XSTATS_TYPE_MSM = 0,
200 	XSTATS_TYPE_MACSEC,
201 };
202 
203 #define ATL_XSTATS_FIELD(name) { \
204 	#name, \
205 	offsetof(struct aq_stats_s, name), \
206 	XSTATS_TYPE_MSM \
207 }
208 
209 #define ATL_MACSEC_XSTATS_FIELD(name) { \
210 	#name, \
211 	offsetof(struct macsec_stats, name), \
212 	XSTATS_TYPE_MACSEC \
213 }
214 
215 struct atl_xstats_tbl_s {
216 	const char *name;
217 	unsigned int offset;
218 	enum atl_xstats_type type;
219 };
220 
221 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
222 	ATL_XSTATS_FIELD(uprc),
223 	ATL_XSTATS_FIELD(mprc),
224 	ATL_XSTATS_FIELD(bprc),
225 	ATL_XSTATS_FIELD(erpt),
226 	ATL_XSTATS_FIELD(uptc),
227 	ATL_XSTATS_FIELD(mptc),
228 	ATL_XSTATS_FIELD(bptc),
229 	ATL_XSTATS_FIELD(erpr),
230 	ATL_XSTATS_FIELD(ubrc),
231 	ATL_XSTATS_FIELD(ubtc),
232 	ATL_XSTATS_FIELD(mbrc),
233 	ATL_XSTATS_FIELD(mbtc),
234 	ATL_XSTATS_FIELD(bbrc),
235 	ATL_XSTATS_FIELD(bbtc),
236 	/* Ingress Common Counters */
237 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
239 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
240 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
244 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
245 	/* Ingress SA Counters */
246 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
247 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
248 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
249 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
250 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
251 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
252 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
254 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
255 	/* Egress Common Counters */
256 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
259 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
260 	/* Egress SC Counters */
261 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
262 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
263 	/* Egress SA Counters */
264 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
265 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
266 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
267 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
268 };
269 
270 static const struct eth_dev_ops atl_eth_dev_ops = {
271 	.dev_configure	      = atl_dev_configure,
272 	.dev_start	      = atl_dev_start,
273 	.dev_stop	      = atl_dev_stop,
274 	.dev_set_link_up      = atl_dev_set_link_up,
275 	.dev_set_link_down    = atl_dev_set_link_down,
276 	.dev_close	      = atl_dev_close,
277 	.dev_reset	      = atl_dev_reset,
278 
279 	/* PROMISC */
280 	.promiscuous_enable   = atl_dev_promiscuous_enable,
281 	.promiscuous_disable  = atl_dev_promiscuous_disable,
282 	.allmulticast_enable  = atl_dev_allmulticast_enable,
283 	.allmulticast_disable = atl_dev_allmulticast_disable,
284 
285 	/* Link */
286 	.link_update	      = atl_dev_link_update,
287 
288 	.get_reg              = atl_dev_get_regs,
289 
290 	/* Stats */
291 	.stats_get	      = atl_dev_stats_get,
292 	.xstats_get	      = atl_dev_xstats_get,
293 	.xstats_get_names     = atl_dev_xstats_get_names,
294 	.stats_reset	      = atl_dev_stats_reset,
295 	.xstats_reset	      = atl_dev_stats_reset,
296 
297 	.fw_version_get       = atl_fw_version_get,
298 	.dev_infos_get	      = atl_dev_info_get,
299 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
300 
301 	.mtu_set              = atl_dev_mtu_set,
302 
303 	/* VLAN */
304 	.vlan_filter_set      = atl_vlan_filter_set,
305 	.vlan_offload_set     = atl_vlan_offload_set,
306 	.vlan_tpid_set        = atl_vlan_tpid_set,
307 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
308 
309 	/* Queue Control */
310 	.rx_queue_start	      = atl_rx_queue_start,
311 	.rx_queue_stop	      = atl_rx_queue_stop,
312 	.rx_queue_setup       = atl_rx_queue_setup,
313 	.rx_queue_release     = atl_rx_queue_release,
314 
315 	.tx_queue_start	      = atl_tx_queue_start,
316 	.tx_queue_stop	      = atl_tx_queue_stop,
317 	.tx_queue_setup       = atl_tx_queue_setup,
318 	.tx_queue_release     = atl_tx_queue_release,
319 
320 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
321 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
322 
323 	.rx_queue_count       = atl_rx_queue_count,
324 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
325 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
326 
327 	/* EEPROM */
328 	.get_eeprom_length    = atl_dev_get_eeprom_length,
329 	.get_eeprom           = atl_dev_get_eeprom,
330 	.set_eeprom           = atl_dev_set_eeprom,
331 
332 	/* Flow Control */
333 	.flow_ctrl_get	      = atl_flow_ctrl_get,
334 	.flow_ctrl_set	      = atl_flow_ctrl_set,
335 
336 	/* MAC */
337 	.mac_addr_add	      = atl_add_mac_addr,
338 	.mac_addr_remove      = atl_remove_mac_addr,
339 	.mac_addr_set	      = atl_set_default_mac_addr,
340 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
341 	.rxq_info_get	      = atl_rxq_info_get,
342 	.txq_info_get	      = atl_txq_info_get,
343 
344 	.reta_update          = atl_reta_update,
345 	.reta_query           = atl_reta_query,
346 	.rss_hash_update      = atl_rss_hash_update,
347 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
348 };
349 
350 static inline int32_t
351 atl_reset_hw(struct aq_hw_s *hw)
352 {
353 	return hw_atl_b0_hw_reset(hw);
354 }
355 
356 static inline void
357 atl_enable_intr(struct rte_eth_dev *dev)
358 {
359 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
360 
361 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
362 }
363 
364 static void
365 atl_disable_intr(struct aq_hw_s *hw)
366 {
367 	PMD_INIT_FUNC_TRACE();
368 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
369 }
370 
371 static int
372 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
373 {
374 	struct atl_adapter *adapter =
375 		(struct atl_adapter *)eth_dev->data->dev_private;
376 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
377 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
378 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
379 	int err = 0;
380 
381 	PMD_INIT_FUNC_TRACE();
382 
383 	eth_dev->dev_ops = &atl_eth_dev_ops;
384 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
385 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
386 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
387 
388 	/* For secondary processes, the primary process has done all the work */
389 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
390 		return 0;
391 
392 	/* Vendor and Device ID need to be set before init of shared code */
393 	hw->device_id = pci_dev->id.device_id;
394 	hw->vendor_id = pci_dev->id.vendor_id;
395 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
396 
397 	/* Hardware configuration - hardcode */
398 	adapter->hw_cfg.is_lro = false;
399 	adapter->hw_cfg.wol = false;
400 	adapter->hw_cfg.is_rss = false;
401 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
402 
403 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
404 			  AQ_NIC_RATE_5G |
405 			  AQ_NIC_RATE_2G5 |
406 			  AQ_NIC_RATE_1G |
407 			  AQ_NIC_RATE_100M;
408 
409 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
410 	adapter->hw_cfg.aq_rss.indirection_table_size =
411 		HW_ATL_B0_RSS_REDIRECTION_MAX;
412 
413 	hw->aq_nic_cfg = &adapter->hw_cfg;
414 
415 	/* disable interrupt */
416 	atl_disable_intr(hw);
417 
418 	/* Allocate memory for storing MAC addresses */
419 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
420 					RTE_ETHER_ADDR_LEN, 0);
421 	if (eth_dev->data->mac_addrs == NULL) {
422 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
423 		return -ENOMEM;
424 	}
425 
426 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
427 	if (err)
428 		return err;
429 
430 	/* Copy the permanent MAC address */
431 	if (hw->aq_fw_ops->get_mac_permanent(hw,
432 			eth_dev->data->mac_addrs->addr_bytes) != 0)
433 		return -EINVAL;
434 
435 	/* Reset the hw statistics */
436 	atl_dev_stats_reset(eth_dev);
437 
438 	rte_intr_callback_register(intr_handle,
439 				   atl_dev_interrupt_handler, eth_dev);
440 
441 	/* enable uio/vfio intr/eventfd mapping */
442 	rte_intr_enable(intr_handle);
443 
444 	/* enable support intr */
445 	atl_enable_intr(eth_dev);
446 
447 	return err;
448 }
449 
450 static int
451 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
452 {
453 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
454 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
455 	struct aq_hw_s *hw;
456 
457 	PMD_INIT_FUNC_TRACE();
458 
459 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
460 		return -EPERM;
461 
462 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
463 
464 	if (hw->adapter_stopped == 0)
465 		atl_dev_close(eth_dev);
466 
467 	eth_dev->dev_ops = NULL;
468 	eth_dev->rx_pkt_burst = NULL;
469 	eth_dev->tx_pkt_burst = NULL;
470 
471 	/* disable uio intr before callback unregister */
472 	rte_intr_disable(intr_handle);
473 	rte_intr_callback_unregister(intr_handle,
474 				     atl_dev_interrupt_handler, eth_dev);
475 
476 	rte_free(eth_dev->data->mac_addrs);
477 	eth_dev->data->mac_addrs = NULL;
478 
479 	return 0;
480 }
481 
482 static int
483 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
484 	struct rte_pci_device *pci_dev)
485 {
486 	return rte_eth_dev_pci_generic_probe(pci_dev,
487 		sizeof(struct atl_adapter), eth_atl_dev_init);
488 }
489 
490 static int
491 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
492 {
493 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
494 }
495 
496 static int
497 atl_dev_configure(struct rte_eth_dev *dev)
498 {
499 	struct atl_interrupt *intr =
500 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
501 
502 	PMD_INIT_FUNC_TRACE();
503 
504 	/* set flag to update link status after init */
505 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
506 
507 	return 0;
508 }
509 
510 /*
511  * Configure device link speed and setup link.
512  * It returns 0 on success.
513  */
514 static int
515 atl_dev_start(struct rte_eth_dev *dev)
516 {
517 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
518 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
519 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
520 	uint32_t intr_vector = 0;
521 	int status;
522 	int err;
523 
524 	PMD_INIT_FUNC_TRACE();
525 
526 	/* set adapter started */
527 	hw->adapter_stopped = 0;
528 
529 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
530 		PMD_INIT_LOG(ERR,
531 		"Invalid link_speeds for port %u, fix speed not supported",
532 				dev->data->port_id);
533 		return -EINVAL;
534 	}
535 
536 	/* disable uio/vfio intr/eventfd mapping */
537 	rte_intr_disable(intr_handle);
538 
539 	/* reinitialize adapter
540 	 * this calls reset and start
541 	 */
542 	status = atl_reset_hw(hw);
543 	if (status != 0)
544 		return -EIO;
545 
546 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
547 
548 	hw_atl_b0_hw_start(hw);
549 	/* check and configure queue intr-vector mapping */
550 	if ((rte_intr_cap_multiple(intr_handle) ||
551 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
552 	    dev->data->dev_conf.intr_conf.rxq != 0) {
553 		intr_vector = dev->data->nb_rx_queues;
554 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
555 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
556 					ATL_MAX_INTR_QUEUE_NUM);
557 			return -ENOTSUP;
558 		}
559 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
560 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
561 			return -1;
562 		}
563 	}
564 
565 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
566 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
567 				    dev->data->nb_rx_queues * sizeof(int), 0);
568 		if (intr_handle->intr_vec == NULL) {
569 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
570 				     " intr_vec", dev->data->nb_rx_queues);
571 			return -ENOMEM;
572 		}
573 	}
574 
575 	/* initialize transmission unit */
576 	atl_tx_init(dev);
577 
578 	/* This can fail when allocating mbufs for descriptor rings */
579 	err = atl_rx_init(dev);
580 	if (err) {
581 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
582 		goto error;
583 	}
584 
585 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
586 		hw->fw_ver_actual >> 24,
587 		(hw->fw_ver_actual >> 16) & 0xFF,
588 		hw->fw_ver_actual & 0xFFFF);
589 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
590 
591 	err = atl_start_queues(dev);
592 	if (err < 0) {
593 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
594 		goto error;
595 	}
596 
597 	err = atl_dev_set_link_up(dev);
598 
599 	err = hw->aq_fw_ops->update_link_status(hw);
600 
601 	if (err)
602 		goto error;
603 
604 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
605 
606 	if (rte_intr_allow_others(intr_handle)) {
607 		/* check if lsc interrupt is enabled */
608 		if (dev->data->dev_conf.intr_conf.lsc != 0)
609 			atl_dev_lsc_interrupt_setup(dev, true);
610 		else
611 			atl_dev_lsc_interrupt_setup(dev, false);
612 	} else {
613 		rte_intr_callback_unregister(intr_handle,
614 					     atl_dev_interrupt_handler, dev);
615 		if (dev->data->dev_conf.intr_conf.lsc != 0)
616 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
617 				     " no intr multiplex");
618 	}
619 
620 	/* check if rxq interrupt is enabled */
621 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
622 	    rte_intr_dp_is_en(intr_handle))
623 		atl_dev_rxq_interrupt_setup(dev);
624 
625 	/* enable uio/vfio intr/eventfd mapping */
626 	rte_intr_enable(intr_handle);
627 
628 	/* resume enabled intr since hw reset */
629 	atl_enable_intr(dev);
630 
631 	return 0;
632 
633 error:
634 	atl_stop_queues(dev);
635 	return -EIO;
636 }
637 
638 /*
639  * Stop device: disable rx and tx functions to allow for reconfiguring.
640  */
641 static void
642 atl_dev_stop(struct rte_eth_dev *dev)
643 {
644 	struct rte_eth_link link;
645 	struct aq_hw_s *hw =
646 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
647 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
648 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
649 
650 	PMD_INIT_FUNC_TRACE();
651 
652 	/* disable interrupts */
653 	atl_disable_intr(hw);
654 
655 	/* reset the NIC */
656 	atl_reset_hw(hw);
657 	hw->adapter_stopped = 1;
658 
659 	atl_stop_queues(dev);
660 
661 	/* Clear stored conf */
662 	dev->data->scattered_rx = 0;
663 	dev->data->lro = 0;
664 
665 	/* Clear recorded link status */
666 	memset(&link, 0, sizeof(link));
667 	rte_eth_linkstatus_set(dev, &link);
668 
669 	if (!rte_intr_allow_others(intr_handle))
670 		/* resume to the default handler */
671 		rte_intr_callback_register(intr_handle,
672 					   atl_dev_interrupt_handler,
673 					   (void *)dev);
674 
675 	/* Clean datapath event and queue/vec mapping */
676 	rte_intr_efd_disable(intr_handle);
677 	if (intr_handle->intr_vec != NULL) {
678 		rte_free(intr_handle->intr_vec);
679 		intr_handle->intr_vec = NULL;
680 	}
681 }
682 
683 /*
684  * Set device link up: enable tx.
685  */
686 static int
687 atl_dev_set_link_up(struct rte_eth_dev *dev)
688 {
689 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
691 	uint32_t speed_mask = 0;
692 
693 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
694 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
695 	} else {
696 		if (link_speeds & ETH_LINK_SPEED_10G)
697 			speed_mask |= AQ_NIC_RATE_10G;
698 		if (link_speeds & ETH_LINK_SPEED_5G)
699 			speed_mask |= AQ_NIC_RATE_5G;
700 		if (link_speeds & ETH_LINK_SPEED_1G)
701 			speed_mask |= AQ_NIC_RATE_1G;
702 		if (link_speeds & ETH_LINK_SPEED_2_5G)
703 			speed_mask |=  AQ_NIC_RATE_2G5;
704 		if (link_speeds & ETH_LINK_SPEED_100M)
705 			speed_mask |= AQ_NIC_RATE_100M;
706 	}
707 
708 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
709 }
710 
711 /*
712  * Set device link down: disable tx.
713  */
714 static int
715 atl_dev_set_link_down(struct rte_eth_dev *dev)
716 {
717 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
718 
719 	return hw->aq_fw_ops->set_link_speed(hw, 0);
720 }
721 
722 /*
723  * Reset and stop device.
724  */
725 static void
726 atl_dev_close(struct rte_eth_dev *dev)
727 {
728 	PMD_INIT_FUNC_TRACE();
729 
730 	atl_dev_stop(dev);
731 
732 	atl_free_queues(dev);
733 }
734 
735 static int
736 atl_dev_reset(struct rte_eth_dev *dev)
737 {
738 	int ret;
739 
740 	ret = eth_atl_dev_uninit(dev);
741 	if (ret)
742 		return ret;
743 
744 	ret = eth_atl_dev_init(dev);
745 
746 	return ret;
747 }
748 
749 static int
750 atl_dev_configure_macsec(struct rte_eth_dev *dev)
751 {
752 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
753 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
754 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
755 	struct macsec_msg_fw_request msg_macsec;
756 	struct macsec_msg_fw_response response;
757 
758 	if (!aqcfg->common.macsec_enabled ||
759 	    hw->aq_fw_ops->send_macsec_req == NULL)
760 		return 0;
761 
762 	memset(&msg_macsec, 0, sizeof(msg_macsec));
763 
764 	/* Creating set of sc/sa structures from parameters provided by DPDK */
765 
766 	/* Configure macsec */
767 	msg_macsec.msg_type = macsec_cfg_msg;
768 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
769 	msg_macsec.cfg.interrupts_enabled = 1;
770 
771 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
772 
773 	if (response.result)
774 		return -1;
775 
776 	memset(&msg_macsec, 0, sizeof(msg_macsec));
777 
778 	/* Configure TX SC */
779 
780 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
781 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
782 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
783 
784 	/* MAC addr for TX */
785 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
786 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
787 	msg_macsec.txsc.sa_mask = 0x3f;
788 
789 	msg_macsec.txsc.da_mask = 0;
790 	msg_macsec.txsc.tci = 0x0B;
791 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
792 
793 	/*
794 	 * Creating SCI (Secure Channel Identifier).
795 	 * SCI constructed from Source MAC and Port identifier
796 	 */
797 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
798 			       (msg_macsec.txsc.mac_sa[0] >> 16);
799 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
800 
801 	uint32_t port_identifier = 1;
802 
803 	msg_macsec.txsc.sci[1] = sci_hi_part;
804 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
805 
806 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
807 
808 	if (response.result)
809 		return -1;
810 
811 	memset(&msg_macsec, 0, sizeof(msg_macsec));
812 
813 	/* Configure RX SC */
814 
815 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
816 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
817 	msg_macsec.rxsc.replay_protect =
818 		aqcfg->common.replay_protection_enabled;
819 	msg_macsec.rxsc.anti_replay_window = 0;
820 
821 	/* MAC addr for RX */
822 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
823 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
824 	msg_macsec.rxsc.da_mask = 0;//0x3f;
825 
826 	msg_macsec.rxsc.sa_mask = 0;
827 
828 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
829 
830 	if (response.result)
831 		return -1;
832 
833 	memset(&msg_macsec, 0, sizeof(msg_macsec));
834 
835 	/* Configure RX SC */
836 
837 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
838 	msg_macsec.txsa.index = aqcfg->txsa.idx;
839 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
840 
841 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
842 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
843 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
844 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
845 
846 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
847 
848 	if (response.result)
849 		return -1;
850 
851 	memset(&msg_macsec, 0, sizeof(msg_macsec));
852 
853 	/* Configure RX SA */
854 
855 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
856 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
857 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
858 
859 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
860 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
861 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
862 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
863 
864 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
865 
866 	if (response.result)
867 		return -1;
868 
869 	return 0;
870 }
871 
872 int atl_macsec_enable(struct rte_eth_dev *dev,
873 		      uint8_t encr, uint8_t repl_prot)
874 {
875 	struct aq_hw_cfg_s *cfg =
876 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
877 
878 	cfg->aq_macsec.common.macsec_enabled = 1;
879 	cfg->aq_macsec.common.encryption_enabled = encr;
880 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
881 
882 	return 0;
883 }
884 
885 int atl_macsec_disable(struct rte_eth_dev *dev)
886 {
887 	struct aq_hw_cfg_s *cfg =
888 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
889 
890 	cfg->aq_macsec.common.macsec_enabled = 0;
891 
892 	return 0;
893 }
894 
895 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
896 {
897 	struct aq_hw_cfg_s *cfg =
898 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
899 
900 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
901 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
902 		RTE_ETHER_ADDR_LEN);
903 
904 	return 0;
905 }
906 
907 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
908 			   uint8_t *mac, uint16_t pi)
909 {
910 	struct aq_hw_cfg_s *cfg =
911 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
912 
913 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
914 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
915 		RTE_ETHER_ADDR_LEN);
916 	cfg->aq_macsec.rxsc.pi = pi;
917 
918 	return 0;
919 }
920 
921 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
922 			   uint8_t idx, uint8_t an,
923 			   uint32_t pn, uint8_t *key)
924 {
925 	struct aq_hw_cfg_s *cfg =
926 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
927 
928 	cfg->aq_macsec.txsa.idx = idx;
929 	cfg->aq_macsec.txsa.pn = pn;
930 	cfg->aq_macsec.txsa.an = an;
931 
932 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
933 	return 0;
934 }
935 
936 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
937 			   uint8_t idx, uint8_t an,
938 			   uint32_t pn, uint8_t *key)
939 {
940 	struct aq_hw_cfg_s *cfg =
941 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
942 
943 	cfg->aq_macsec.rxsa.idx = idx;
944 	cfg->aq_macsec.rxsa.pn = pn;
945 	cfg->aq_macsec.rxsa.an = an;
946 
947 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
948 	return 0;
949 }
950 
951 static int
952 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
953 {
954 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
955 	struct aq_hw_s *hw = &adapter->hw;
956 	struct atl_sw_stats *swstats = &adapter->sw_stats;
957 	unsigned int i;
958 
959 	hw->aq_fw_ops->update_stats(hw);
960 
961 	/* Fill out the rte_eth_stats statistics structure */
962 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
963 	stats->ibytes = hw->curr_stats.dma_oct_rc;
964 	stats->imissed = hw->curr_stats.dpc;
965 	stats->ierrors = hw->curr_stats.erpt;
966 
967 	stats->opackets = hw->curr_stats.dma_pkt_tc;
968 	stats->obytes = hw->curr_stats.dma_oct_tc;
969 	stats->oerrors = 0;
970 
971 	stats->rx_nombuf = swstats->rx_nombuf;
972 
973 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
974 		stats->q_ipackets[i] = swstats->q_ipackets[i];
975 		stats->q_opackets[i] = swstats->q_opackets[i];
976 		stats->q_ibytes[i] = swstats->q_ibytes[i];
977 		stats->q_obytes[i] = swstats->q_obytes[i];
978 		stats->q_errors[i] = swstats->q_errors[i];
979 	}
980 	return 0;
981 }
982 
983 static void
984 atl_dev_stats_reset(struct rte_eth_dev *dev)
985 {
986 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
987 	struct aq_hw_s *hw = &adapter->hw;
988 
989 	hw->aq_fw_ops->update_stats(hw);
990 
991 	/* Reset software totals */
992 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
993 
994 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
995 }
996 
997 static int
998 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
999 			 struct rte_eth_xstat_name *xstats_names,
1000 			 unsigned int size)
1001 {
1002 	unsigned int i;
1003 
1004 	if (!xstats_names)
1005 		return RTE_DIM(atl_xstats_tbl);
1006 
1007 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
1008 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
1009 			RTE_ETH_XSTATS_NAME_SIZE);
1010 
1011 	return i;
1012 }
1013 
1014 static int
1015 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1016 		   unsigned int n)
1017 {
1018 	struct atl_adapter *adapter =
1019 	(struct atl_adapter *)dev->data->dev_private;
1020 	struct aq_hw_s *hw = &adapter->hw;
1021 	struct get_stats req = { 0 };
1022 	struct macsec_msg_fw_request msg = { 0 };
1023 	struct macsec_msg_fw_response resp = { 0 };
1024 	int err = -1;
1025 	unsigned int i;
1026 
1027 	if (!stats)
1028 		return 0;
1029 
1030 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1031 		req.ingress_sa_index = 0xff;
1032 		req.egress_sc_index = 0xff;
1033 		req.egress_sa_index = 0xff;
1034 
1035 		msg.msg_type = macsec_get_stats_msg;
1036 		msg.stats = req;
1037 
1038 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1039 	}
1040 
1041 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
1042 		stats[i].id = i;
1043 
1044 		switch (atl_xstats_tbl[i].type) {
1045 		case XSTATS_TYPE_MSM:
1046 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1047 					 atl_xstats_tbl[i].offset);
1048 			break;
1049 		case XSTATS_TYPE_MACSEC:
1050 			if (err)
1051 				goto done;
1052 			stats[i].value = *(u64 *)((uint8_t *)&resp.stats +
1053 					 atl_xstats_tbl[i].offset);
1054 			break;
1055 		}
1056 	}
1057 done:
1058 	return i;
1059 }
1060 
1061 static int
1062 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1063 {
1064 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1065 	uint32_t fw_ver = 0;
1066 	unsigned int ret = 0;
1067 
1068 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1069 	if (ret)
1070 		return -EIO;
1071 
1072 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1073 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1074 
1075 	ret += 1; /* add string null-terminator */
1076 
1077 	if (fw_size < ret)
1078 		return ret;
1079 
1080 	return 0;
1081 }
1082 
1083 static void
1084 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1085 {
1086 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1087 
1088 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1089 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1090 
1091 	dev_info->min_rx_bufsize = 1024;
1092 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1093 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1094 	dev_info->max_vfs = pci_dev->max_vfs;
1095 
1096 	dev_info->max_hash_mac_addrs = 0;
1097 	dev_info->max_vmdq_pools = 0;
1098 	dev_info->vmdq_queue_num = 0;
1099 
1100 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1101 
1102 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1103 
1104 
1105 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1106 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1107 	};
1108 
1109 	dev_info->default_txconf = (struct rte_eth_txconf) {
1110 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1111 	};
1112 
1113 	dev_info->rx_desc_lim = rx_desc_lim;
1114 	dev_info->tx_desc_lim = tx_desc_lim;
1115 
1116 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1117 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1118 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1119 
1120 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1121 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1122 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1123 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1124 }
1125 
1126 static const uint32_t *
1127 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1128 {
1129 	static const uint32_t ptypes[] = {
1130 		RTE_PTYPE_L2_ETHER,
1131 		RTE_PTYPE_L2_ETHER_ARP,
1132 		RTE_PTYPE_L2_ETHER_VLAN,
1133 		RTE_PTYPE_L3_IPV4,
1134 		RTE_PTYPE_L3_IPV6,
1135 		RTE_PTYPE_L4_TCP,
1136 		RTE_PTYPE_L4_UDP,
1137 		RTE_PTYPE_L4_SCTP,
1138 		RTE_PTYPE_L4_ICMP,
1139 		RTE_PTYPE_UNKNOWN
1140 	};
1141 
1142 	if (dev->rx_pkt_burst == atl_recv_pkts)
1143 		return ptypes;
1144 
1145 	return NULL;
1146 }
1147 
1148 static void
1149 atl_dev_delayed_handler(void *param)
1150 {
1151 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1152 
1153 	atl_dev_configure_macsec(dev);
1154 }
1155 
1156 
1157 /* return 0 means link status changed, -1 means not changed */
1158 static int
1159 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1160 {
1161 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1162 	struct rte_eth_link link, old;
1163 	u32 fc = AQ_NIC_FC_OFF;
1164 	int err = 0;
1165 
1166 	link.link_status = ETH_LINK_DOWN;
1167 	link.link_speed = 0;
1168 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1169 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1170 	memset(&old, 0, sizeof(old));
1171 
1172 	/* load old link status */
1173 	rte_eth_linkstatus_get(dev, &old);
1174 
1175 	/* read current link status */
1176 	err = hw->aq_fw_ops->update_link_status(hw);
1177 
1178 	if (err)
1179 		return 0;
1180 
1181 	if (hw->aq_link_status.mbps == 0) {
1182 		/* write default (down) link status */
1183 		rte_eth_linkstatus_set(dev, &link);
1184 		if (link.link_status == old.link_status)
1185 			return -1;
1186 		return 0;
1187 	}
1188 
1189 	link.link_status = ETH_LINK_UP;
1190 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1191 	link.link_speed = hw->aq_link_status.mbps;
1192 
1193 	rte_eth_linkstatus_set(dev, &link);
1194 
1195 	if (link.link_status == old.link_status)
1196 		return -1;
1197 
1198 	/* Driver has to update flow control settings on RX block
1199 	 * on any link event.
1200 	 * We should query FW whether it negotiated FC.
1201 	 */
1202 	if (hw->aq_fw_ops->get_flow_control) {
1203 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1204 		hw_atl_b0_set_fc(hw, fc, 0U);
1205 	}
1206 
1207 	if (rte_eal_alarm_set(1000 * 1000,
1208 			      atl_dev_delayed_handler, (void *)dev) < 0)
1209 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1210 
1211 	return 0;
1212 }
1213 
1214 static void
1215 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1216 {
1217 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1218 
1219 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1220 }
1221 
1222 static void
1223 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1224 {
1225 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1226 
1227 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1228 }
1229 
1230 static void
1231 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1232 {
1233 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1234 
1235 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1236 }
1237 
1238 static void
1239 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1240 {
1241 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1242 
1243 	if (dev->data->promiscuous == 1)
1244 		return; /* must remain in all_multicast mode */
1245 
1246 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1247 }
1248 
1249 /**
1250  * It clears the interrupt causes and enables the interrupt.
1251  * It will be called once only during nic initialized.
1252  *
1253  * @param dev
1254  *  Pointer to struct rte_eth_dev.
1255  * @param on
1256  *  Enable or Disable.
1257  *
1258  * @return
1259  *  - On success, zero.
1260  *  - On failure, a negative value.
1261  */
1262 
1263 static int
1264 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1265 {
1266 	atl_dev_link_status_print(dev);
1267 	return 0;
1268 }
1269 
1270 static int
1271 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1272 {
1273 	return 0;
1274 }
1275 
1276 
1277 static int
1278 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1279 {
1280 	struct atl_interrupt *intr =
1281 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1282 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1283 	u64 cause = 0;
1284 
1285 	hw_atl_b0_hw_irq_read(hw, &cause);
1286 
1287 	atl_disable_intr(hw);
1288 
1289 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1290 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1291 
1292 	return 0;
1293 }
1294 
1295 /**
1296  * It gets and then prints the link status.
1297  *
1298  * @param dev
1299  *  Pointer to struct rte_eth_dev.
1300  *
1301  * @return
1302  *  - On success, zero.
1303  *  - On failure, a negative value.
1304  */
1305 static void
1306 atl_dev_link_status_print(struct rte_eth_dev *dev)
1307 {
1308 	struct rte_eth_link link;
1309 
1310 	memset(&link, 0, sizeof(link));
1311 	rte_eth_linkstatus_get(dev, &link);
1312 	if (link.link_status) {
1313 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1314 					(int)(dev->data->port_id),
1315 					(unsigned int)link.link_speed,
1316 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1317 					"full-duplex" : "half-duplex");
1318 	} else {
1319 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1320 				(int)(dev->data->port_id));
1321 	}
1322 
1323 
1324 #ifdef DEBUG
1325 {
1326 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1327 
1328 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1329 				pci_dev->addr.domain,
1330 				pci_dev->addr.bus,
1331 				pci_dev->addr.devid,
1332 				pci_dev->addr.function);
1333 }
1334 #endif
1335 
1336 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1337 }
1338 
1339 /*
1340  * It executes link_update after knowing an interrupt occurred.
1341  *
1342  * @param dev
1343  *  Pointer to struct rte_eth_dev.
1344  *
1345  * @return
1346  *  - On success, zero.
1347  *  - On failure, a negative value.
1348  */
1349 static int
1350 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1351 			   struct rte_intr_handle *intr_handle)
1352 {
1353 	struct atl_interrupt *intr =
1354 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1355 	struct atl_adapter *adapter =
1356 		(struct atl_adapter *)dev->data->dev_private;
1357 	struct aq_hw_s *hw = &adapter->hw;
1358 
1359 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1360 		goto done;
1361 
1362 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1363 
1364 	/* Notify userapp if link status changed */
1365 	if (!atl_dev_link_update(dev, 0)) {
1366 		atl_dev_link_status_print(dev);
1367 		_rte_eth_dev_callback_process(dev,
1368 			RTE_ETH_EVENT_INTR_LSC, NULL);
1369 	} else {
1370 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1371 			goto done;
1372 
1373 		/* Check macsec Keys expired */
1374 		struct get_stats req = { 0 };
1375 		struct macsec_msg_fw_request msg = { 0 };
1376 		struct macsec_msg_fw_response resp = { 0 };
1377 
1378 		req.ingress_sa_index = 0x0;
1379 		req.egress_sc_index = 0x0;
1380 		req.egress_sa_index = 0x0;
1381 		msg.msg_type = macsec_get_stats_msg;
1382 		msg.stats = req;
1383 
1384 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1385 		if (err) {
1386 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1387 			goto done;
1388 		}
1389 		if (resp.stats.egress_threshold_expired ||
1390 		    resp.stats.ingress_threshold_expired ||
1391 		    resp.stats.egress_expired ||
1392 		    resp.stats.ingress_expired) {
1393 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1394 			_rte_eth_dev_callback_process(dev,
1395 				RTE_ETH_EVENT_MACSEC, NULL);
1396 		}
1397 	}
1398 done:
1399 	atl_enable_intr(dev);
1400 	rte_intr_enable(intr_handle);
1401 
1402 	return 0;
1403 }
1404 
1405 /**
1406  * Interrupt handler triggered by NIC  for handling
1407  * specific interrupt.
1408  *
1409  * @param handle
1410  *  Pointer to interrupt handle.
1411  * @param param
1412  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1413  *
1414  * @return
1415  *  void
1416  */
1417 static void
1418 atl_dev_interrupt_handler(void *param)
1419 {
1420 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1421 
1422 	atl_dev_interrupt_get_status(dev);
1423 	atl_dev_interrupt_action(dev, dev->intr_handle);
1424 }
1425 
1426 
1427 static int
1428 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1429 {
1430 	return SFP_EEPROM_SIZE;
1431 }
1432 
1433 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1434 		       struct rte_dev_eeprom_info *eeprom)
1435 {
1436 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1437 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1438 
1439 	if (hw->aq_fw_ops->get_eeprom == NULL)
1440 		return -ENOTSUP;
1441 
1442 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1443 	    eeprom->data == NULL)
1444 		return -EINVAL;
1445 
1446 	if (eeprom->magic > 0x7F)
1447 		return -EINVAL;
1448 
1449 	if (eeprom->magic)
1450 		dev_addr = eeprom->magic;
1451 
1452 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1453 					 eeprom->length, eeprom->offset);
1454 }
1455 
1456 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1457 		       struct rte_dev_eeprom_info *eeprom)
1458 {
1459 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1460 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1461 
1462 	if (hw->aq_fw_ops->set_eeprom == NULL)
1463 		return -ENOTSUP;
1464 
1465 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1466 	    eeprom->data == NULL)
1467 		return -EINVAL;
1468 
1469 	if (eeprom->magic > 0x7F)
1470 		return -EINVAL;
1471 
1472 	if (eeprom->magic)
1473 		dev_addr = eeprom->magic;
1474 
1475 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1476 					 eeprom->length, eeprom->offset);
1477 }
1478 
1479 static int
1480 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1481 {
1482 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1483 	u32 mif_id;
1484 	int err;
1485 
1486 	if (regs->data == NULL) {
1487 		regs->length = hw_atl_utils_hw_get_reg_length();
1488 		regs->width = sizeof(u32);
1489 		return 0;
1490 	}
1491 
1492 	/* Only full register dump is supported */
1493 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1494 		return -ENOTSUP;
1495 
1496 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1497 
1498 	/* Device version */
1499 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1500 	regs->version = mif_id & 0xFFU;
1501 
1502 	return err;
1503 }
1504 
1505 static int
1506 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1507 {
1508 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1509 	u32 fc = AQ_NIC_FC_OFF;
1510 
1511 	if (hw->aq_fw_ops->get_flow_control == NULL)
1512 		return -ENOTSUP;
1513 
1514 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1515 
1516 	if (fc == AQ_NIC_FC_OFF)
1517 		fc_conf->mode = RTE_FC_NONE;
1518 	else if (fc & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1519 		fc_conf->mode = RTE_FC_FULL;
1520 	else if (fc & AQ_NIC_FC_RX)
1521 		fc_conf->mode = RTE_FC_RX_PAUSE;
1522 	else if (fc & AQ_NIC_FC_RX)
1523 		fc_conf->mode = RTE_FC_TX_PAUSE;
1524 
1525 	return 0;
1526 }
1527 
1528 static int
1529 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1530 {
1531 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1532 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1533 
1534 
1535 	if (hw->aq_fw_ops->set_flow_control == NULL)
1536 		return -ENOTSUP;
1537 
1538 	if (fc_conf->mode == RTE_FC_NONE)
1539 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1540 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1541 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1542 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1543 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1544 	else if (fc_conf->mode == RTE_FC_FULL)
1545 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1546 
1547 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1548 		return hw->aq_fw_ops->set_flow_control(hw);
1549 
1550 	return 0;
1551 }
1552 
1553 static int
1554 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1555 		    u8 *mac_addr, bool enable)
1556 {
1557 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1558 	unsigned int h = 0U;
1559 	unsigned int l = 0U;
1560 	int err;
1561 
1562 	if (mac_addr) {
1563 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1564 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1565 			(mac_addr[4] << 8) | mac_addr[5];
1566 	}
1567 
1568 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1569 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1570 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1571 
1572 	if (enable)
1573 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1574 
1575 	err = aq_hw_err_from_flags(hw);
1576 
1577 	return err;
1578 }
1579 
1580 static int
1581 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1582 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1583 {
1584 	if (rte_is_zero_ether_addr(mac_addr)) {
1585 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1586 		return -EINVAL;
1587 	}
1588 
1589 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1590 }
1591 
1592 static void
1593 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1594 {
1595 	atl_update_mac_addr(dev, index, NULL, false);
1596 }
1597 
1598 static int
1599 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1600 {
1601 	atl_remove_mac_addr(dev, 0);
1602 	atl_add_mac_addr(dev, addr, 0, 0);
1603 	return 0;
1604 }
1605 
1606 static int
1607 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1608 {
1609 	struct rte_eth_dev_info dev_info;
1610 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1611 
1612 	atl_dev_info_get(dev, &dev_info);
1613 
1614 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1615 		return -EINVAL;
1616 
1617 	/* update max frame size */
1618 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1619 
1620 	return 0;
1621 }
1622 
1623 static int
1624 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1625 {
1626 	struct aq_hw_cfg_s *cfg =
1627 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1628 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1629 	int err = 0;
1630 	int i = 0;
1631 
1632 	PMD_INIT_FUNC_TRACE();
1633 
1634 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1635 		if (cfg->vlan_filter[i] == vlan_id) {
1636 			if (!on) {
1637 				/* Disable VLAN filter. */
1638 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1639 
1640 				/* Clear VLAN filter entry */
1641 				cfg->vlan_filter[i] = 0;
1642 			}
1643 			break;
1644 		}
1645 	}
1646 
1647 	/* VLAN_ID was not found. So, nothing to delete. */
1648 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1649 		goto exit;
1650 
1651 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1652 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1653 		goto exit;
1654 
1655 	/* Try to found free VLAN filter to add new VLAN_ID */
1656 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1657 		if (cfg->vlan_filter[i] == 0)
1658 			break;
1659 	}
1660 
1661 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1662 		/* We have no free VLAN filter to add new VLAN_ID*/
1663 		err = -ENOMEM;
1664 		goto exit;
1665 	}
1666 
1667 	cfg->vlan_filter[i] = vlan_id;
1668 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1669 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1670 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1671 
1672 exit:
1673 	/* Enable VLAN promisc mode if vlan_filter empty  */
1674 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1675 		if (cfg->vlan_filter[i] != 0)
1676 			break;
1677 	}
1678 
1679 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1680 
1681 	return err;
1682 }
1683 
1684 static int
1685 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1686 {
1687 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1688 	struct aq_hw_cfg_s *cfg =
1689 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1690 	int i;
1691 
1692 	PMD_INIT_FUNC_TRACE();
1693 
1694 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1695 		if (cfg->vlan_filter[i])
1696 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1697 	}
1698 	return 0;
1699 }
1700 
1701 static int
1702 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1703 {
1704 	struct aq_hw_cfg_s *cfg =
1705 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1706 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1707 	int ret = 0;
1708 	int i;
1709 
1710 	PMD_INIT_FUNC_TRACE();
1711 
1712 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1713 
1714 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1715 
1716 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1717 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1718 
1719 	if (mask & ETH_VLAN_EXTEND_MASK)
1720 		ret = -ENOTSUP;
1721 
1722 	return ret;
1723 }
1724 
1725 static int
1726 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1727 		  uint16_t tpid)
1728 {
1729 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1730 	int err = 0;
1731 
1732 	PMD_INIT_FUNC_TRACE();
1733 
1734 	switch (vlan_type) {
1735 	case ETH_VLAN_TYPE_INNER:
1736 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1737 		break;
1738 	case ETH_VLAN_TYPE_OUTER:
1739 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1740 		break;
1741 	default:
1742 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1743 		err = -ENOTSUP;
1744 	}
1745 
1746 	return err;
1747 }
1748 
1749 static void
1750 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1751 {
1752 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1753 
1754 	PMD_INIT_FUNC_TRACE();
1755 
1756 	if (queue_id > dev->data->nb_rx_queues) {
1757 		PMD_DRV_LOG(ERR, "Invalid queue id");
1758 		return;
1759 	}
1760 
1761 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1762 }
1763 
1764 static int
1765 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1766 			  struct rte_ether_addr *mc_addr_set,
1767 			  uint32_t nb_mc_addr)
1768 {
1769 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1770 	u32 i;
1771 
1772 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1773 		return -EINVAL;
1774 
1775 	/* Update whole uc filters table */
1776 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1777 		u8 *mac_addr = NULL;
1778 		u32 l = 0, h = 0;
1779 
1780 		if (i < nb_mc_addr) {
1781 			mac_addr = mc_addr_set[i].addr_bytes;
1782 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1783 				(mac_addr[4] << 8) | mac_addr[5];
1784 			h = (mac_addr[0] << 8) | mac_addr[1];
1785 		}
1786 
1787 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1788 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1789 							HW_ATL_B0_MAC_MIN + i);
1790 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1791 							HW_ATL_B0_MAC_MIN + i);
1792 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1793 					   HW_ATL_B0_MAC_MIN + i);
1794 	}
1795 
1796 	return 0;
1797 }
1798 
1799 static int
1800 atl_reta_update(struct rte_eth_dev *dev,
1801 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1802 		   uint16_t reta_size)
1803 {
1804 	int i;
1805 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1806 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1807 
1808 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1809 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1810 					dev->data->nb_rx_queues - 1);
1811 
1812 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1813 	return 0;
1814 }
1815 
1816 static int
1817 atl_reta_query(struct rte_eth_dev *dev,
1818 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1819 		    uint16_t reta_size)
1820 {
1821 	int i;
1822 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1823 
1824 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1825 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1826 	reta_conf->mask = ~0U;
1827 	return 0;
1828 }
1829 
1830 static int
1831 atl_rss_hash_update(struct rte_eth_dev *dev,
1832 				 struct rte_eth_rss_conf *rss_conf)
1833 {
1834 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1835 	struct aq_hw_cfg_s *cfg =
1836 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1837 	static u8 def_rss_key[40] = {
1838 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1839 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1840 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1841 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1842 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1843 	};
1844 
1845 	cfg->is_rss = !!rss_conf->rss_hf;
1846 	if (rss_conf->rss_key) {
1847 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1848 		       rss_conf->rss_key_len);
1849 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1850 	} else {
1851 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1852 		       sizeof(def_rss_key));
1853 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1854 	}
1855 
1856 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1857 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1858 	return 0;
1859 }
1860 
1861 static int
1862 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1863 				 struct rte_eth_rss_conf *rss_conf)
1864 {
1865 	struct aq_hw_cfg_s *cfg =
1866 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1867 
1868 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1869 	if (rss_conf->rss_key) {
1870 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1871 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1872 		       rss_conf->rss_key_len);
1873 	}
1874 
1875 	return 0;
1876 }
1877 
1878 static bool
1879 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1880 {
1881 	if (strcmp(dev->device->driver->name, drv->driver.name))
1882 		return false;
1883 
1884 	return true;
1885 }
1886 
1887 bool
1888 is_atlantic_supported(struct rte_eth_dev *dev)
1889 {
1890 	return is_device_supported(dev, &rte_atl_pmd);
1891 }
1892 
1893 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1894 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1895 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1896 
1897 RTE_INIT(atl_init_log)
1898 {
1899 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1900 	if (atl_logtype_init >= 0)
1901 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1902 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1903 	if (atl_logtype_driver >= 0)
1904 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1905 }
1906