xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision ec0dec44ecb9f90d92c66a6ee984cbb6c4c0e05f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 
8 #include "atl_ethdev.h"
9 #include "atl_common.h"
10 #include "atl_hw_regs.h"
11 #include "atl_logs.h"
12 #include "hw_atl/hw_atl_llh.h"
13 #include "hw_atl/hw_atl_b0.h"
14 #include "hw_atl/hw_atl_b0_internal.h"
15 
16 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
17 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 
19 static int  atl_dev_configure(struct rte_eth_dev *dev);
20 static int  atl_dev_start(struct rte_eth_dev *dev);
21 static void atl_dev_stop(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
24 static void atl_dev_close(struct rte_eth_dev *dev);
25 static int  atl_dev_reset(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
30 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 
32 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
33 				    struct rte_eth_xstat_name *xstats_names,
34 				    unsigned int size);
35 
36 static int atl_dev_stats_get(struct rte_eth_dev *dev,
37 				struct rte_eth_stats *stats);
38 
39 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
40 			      struct rte_eth_xstat *stats, unsigned int n);
41 
42 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 
44 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
45 			      size_t fw_size);
46 
47 static void atl_dev_info_get(struct rte_eth_dev *dev,
48 			       struct rte_eth_dev_info *dev_info);
49 
50 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
51 
52 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
53 
54 /* VLAN stuff */
55 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
56 		uint16_t vlan_id, int on);
57 
58 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
59 
60 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
61 				     uint16_t queue_id, int on);
62 
63 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
64 			     enum rte_vlan_type vlan_type, uint16_t tpid);
65 
66 /* EEPROM */
67 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
68 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
69 			      struct rte_dev_eeprom_info *eeprom);
70 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
71 			      struct rte_dev_eeprom_info *eeprom);
72 
73 /* Regs */
74 static int atl_dev_get_regs(struct rte_eth_dev *dev,
75 			    struct rte_dev_reg_info *regs);
76 
77 /* Flow control */
78 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
79 			       struct rte_eth_fc_conf *fc_conf);
80 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
81 			       struct rte_eth_fc_conf *fc_conf);
82 
83 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
84 
85 /* Interrupts */
86 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
87 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
89 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
90 				    struct rte_intr_handle *handle);
91 static void atl_dev_interrupt_handler(void *param);
92 
93 
94 static int atl_add_mac_addr(struct rte_eth_dev *dev,
95 			    struct ether_addr *mac_addr,
96 			    uint32_t index, uint32_t pool);
97 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
98 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
99 					   struct ether_addr *mac_addr);
100 
101 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
102 				    struct ether_addr *mc_addr_set,
103 				    uint32_t nb_mc_addr);
104 
105 /* RSS */
106 static int atl_reta_update(struct rte_eth_dev *dev,
107 			     struct rte_eth_rss_reta_entry64 *reta_conf,
108 			     uint16_t reta_size);
109 static int atl_reta_query(struct rte_eth_dev *dev,
110 			    struct rte_eth_rss_reta_entry64 *reta_conf,
111 			    uint16_t reta_size);
112 static int atl_rss_hash_update(struct rte_eth_dev *dev,
113 				 struct rte_eth_rss_conf *rss_conf);
114 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
115 				   struct rte_eth_rss_conf *rss_conf);
116 
117 
118 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
119 	struct rte_pci_device *pci_dev);
120 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
121 
122 static void atl_dev_info_get(struct rte_eth_dev *dev,
123 				struct rte_eth_dev_info *dev_info);
124 
125 int atl_logtype_init;
126 int atl_logtype_driver;
127 
128 /*
129  * The set of PCI devices this driver supports
130  */
131 static const struct rte_pci_id pci_id_atl_map[] = {
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
137 
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
144 
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
151 
152 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
153 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
154 	{ .vendor_id = 0, /* sentinel */ },
155 };
156 
157 static struct rte_pci_driver rte_atl_pmd = {
158 	.id_table = pci_id_atl_map,
159 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
160 		     RTE_PCI_DRV_IOVA_AS_VA,
161 	.probe = eth_atl_pci_probe,
162 	.remove = eth_atl_pci_remove,
163 };
164 
165 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
166 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
167 			| DEV_RX_OFFLOAD_UDP_CKSUM \
168 			| DEV_RX_OFFLOAD_TCP_CKSUM \
169 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
170 			| DEV_RX_OFFLOAD_MACSEC_STRIP \
171 			| DEV_RX_OFFLOAD_VLAN_FILTER)
172 
173 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
174 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
175 			| DEV_TX_OFFLOAD_UDP_CKSUM \
176 			| DEV_TX_OFFLOAD_TCP_CKSUM \
177 			| DEV_TX_OFFLOAD_TCP_TSO \
178 			| DEV_TX_OFFLOAD_MACSEC_INSERT \
179 			| DEV_TX_OFFLOAD_MULTI_SEGS)
180 
181 static const struct rte_eth_desc_lim rx_desc_lim = {
182 	.nb_max = ATL_MAX_RING_DESC,
183 	.nb_min = ATL_MIN_RING_DESC,
184 	.nb_align = ATL_RXD_ALIGN,
185 };
186 
187 static const struct rte_eth_desc_lim tx_desc_lim = {
188 	.nb_max = ATL_MAX_RING_DESC,
189 	.nb_min = ATL_MIN_RING_DESC,
190 	.nb_align = ATL_TXD_ALIGN,
191 	.nb_seg_max = ATL_TX_MAX_SEG,
192 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
193 };
194 
195 #define ATL_XSTATS_FIELD(name) { \
196 	#name, \
197 	offsetof(struct aq_stats_s, name) \
198 }
199 
200 struct atl_xstats_tbl_s {
201 	const char *name;
202 	unsigned int offset;
203 };
204 
205 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
206 	ATL_XSTATS_FIELD(uprc),
207 	ATL_XSTATS_FIELD(mprc),
208 	ATL_XSTATS_FIELD(bprc),
209 	ATL_XSTATS_FIELD(erpt),
210 	ATL_XSTATS_FIELD(uptc),
211 	ATL_XSTATS_FIELD(mptc),
212 	ATL_XSTATS_FIELD(bptc),
213 	ATL_XSTATS_FIELD(erpr),
214 	ATL_XSTATS_FIELD(ubrc),
215 	ATL_XSTATS_FIELD(ubtc),
216 	ATL_XSTATS_FIELD(mbrc),
217 	ATL_XSTATS_FIELD(mbtc),
218 	ATL_XSTATS_FIELD(bbrc),
219 	ATL_XSTATS_FIELD(bbtc),
220 };
221 
222 static const struct eth_dev_ops atl_eth_dev_ops = {
223 	.dev_configure	      = atl_dev_configure,
224 	.dev_start	      = atl_dev_start,
225 	.dev_stop	      = atl_dev_stop,
226 	.dev_set_link_up      = atl_dev_set_link_up,
227 	.dev_set_link_down    = atl_dev_set_link_down,
228 	.dev_close	      = atl_dev_close,
229 	.dev_reset	      = atl_dev_reset,
230 
231 	/* PROMISC */
232 	.promiscuous_enable   = atl_dev_promiscuous_enable,
233 	.promiscuous_disable  = atl_dev_promiscuous_disable,
234 	.allmulticast_enable  = atl_dev_allmulticast_enable,
235 	.allmulticast_disable = atl_dev_allmulticast_disable,
236 
237 	/* Link */
238 	.link_update	      = atl_dev_link_update,
239 
240 	.get_reg              = atl_dev_get_regs,
241 
242 	/* Stats */
243 	.stats_get	      = atl_dev_stats_get,
244 	.xstats_get	      = atl_dev_xstats_get,
245 	.xstats_get_names     = atl_dev_xstats_get_names,
246 	.stats_reset	      = atl_dev_stats_reset,
247 	.xstats_reset	      = atl_dev_stats_reset,
248 
249 	.fw_version_get       = atl_fw_version_get,
250 	.dev_infos_get	      = atl_dev_info_get,
251 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
252 
253 	.mtu_set              = atl_dev_mtu_set,
254 
255 	/* VLAN */
256 	.vlan_filter_set      = atl_vlan_filter_set,
257 	.vlan_offload_set     = atl_vlan_offload_set,
258 	.vlan_tpid_set        = atl_vlan_tpid_set,
259 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
260 
261 	/* Queue Control */
262 	.rx_queue_start	      = atl_rx_queue_start,
263 	.rx_queue_stop	      = atl_rx_queue_stop,
264 	.rx_queue_setup       = atl_rx_queue_setup,
265 	.rx_queue_release     = atl_rx_queue_release,
266 
267 	.tx_queue_start	      = atl_tx_queue_start,
268 	.tx_queue_stop	      = atl_tx_queue_stop,
269 	.tx_queue_setup       = atl_tx_queue_setup,
270 	.tx_queue_release     = atl_tx_queue_release,
271 
272 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
273 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
274 
275 	.rx_queue_count       = atl_rx_queue_count,
276 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
277 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
278 
279 	/* EEPROM */
280 	.get_eeprom_length    = atl_dev_get_eeprom_length,
281 	.get_eeprom           = atl_dev_get_eeprom,
282 	.set_eeprom           = atl_dev_set_eeprom,
283 
284 	/* Flow Control */
285 	.flow_ctrl_get	      = atl_flow_ctrl_get,
286 	.flow_ctrl_set	      = atl_flow_ctrl_set,
287 
288 	/* MAC */
289 	.mac_addr_add	      = atl_add_mac_addr,
290 	.mac_addr_remove      = atl_remove_mac_addr,
291 	.mac_addr_set	      = atl_set_default_mac_addr,
292 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
293 	.rxq_info_get	      = atl_rxq_info_get,
294 	.txq_info_get	      = atl_txq_info_get,
295 
296 	.reta_update          = atl_reta_update,
297 	.reta_query           = atl_reta_query,
298 	.rss_hash_update      = atl_rss_hash_update,
299 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
300 };
301 
302 static inline int32_t
303 atl_reset_hw(struct aq_hw_s *hw)
304 {
305 	return hw_atl_b0_hw_reset(hw);
306 }
307 
308 static inline void
309 atl_enable_intr(struct rte_eth_dev *dev)
310 {
311 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
312 
313 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
314 }
315 
316 static void
317 atl_disable_intr(struct aq_hw_s *hw)
318 {
319 	PMD_INIT_FUNC_TRACE();
320 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
321 }
322 
323 static int
324 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
325 {
326 	struct atl_adapter *adapter =
327 		(struct atl_adapter *)eth_dev->data->dev_private;
328 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
329 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
330 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
331 	int err = 0;
332 
333 	PMD_INIT_FUNC_TRACE();
334 
335 	eth_dev->dev_ops = &atl_eth_dev_ops;
336 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
337 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
338 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
339 
340 	/* For secondary processes, the primary process has done all the work */
341 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
342 		return 0;
343 
344 	/* Vendor and Device ID need to be set before init of shared code */
345 	hw->device_id = pci_dev->id.device_id;
346 	hw->vendor_id = pci_dev->id.vendor_id;
347 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
348 
349 	/* Hardware configuration - hardcode */
350 	adapter->hw_cfg.is_lro = false;
351 	adapter->hw_cfg.wol = false;
352 	adapter->hw_cfg.is_rss = false;
353 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
354 
355 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
356 			  AQ_NIC_RATE_5G |
357 			  AQ_NIC_RATE_2G5 |
358 			  AQ_NIC_RATE_1G |
359 			  AQ_NIC_RATE_100M;
360 
361 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
362 	adapter->hw_cfg.aq_rss.indirection_table_size =
363 		HW_ATL_B0_RSS_REDIRECTION_MAX;
364 
365 	hw->aq_nic_cfg = &adapter->hw_cfg;
366 
367 	/* disable interrupt */
368 	atl_disable_intr(hw);
369 
370 	/* Allocate memory for storing MAC addresses */
371 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
372 	if (eth_dev->data->mac_addrs == NULL) {
373 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
374 		return -ENOMEM;
375 	}
376 
377 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
378 	if (err)
379 		return err;
380 
381 	/* Copy the permanent MAC address */
382 	if (hw->aq_fw_ops->get_mac_permanent(hw,
383 			eth_dev->data->mac_addrs->addr_bytes) != 0)
384 		return -EINVAL;
385 
386 	/* Reset the hw statistics */
387 	atl_dev_stats_reset(eth_dev);
388 
389 	rte_intr_callback_register(intr_handle,
390 				   atl_dev_interrupt_handler, eth_dev);
391 
392 	/* enable uio/vfio intr/eventfd mapping */
393 	rte_intr_enable(intr_handle);
394 
395 	/* enable support intr */
396 	atl_enable_intr(eth_dev);
397 
398 	return err;
399 }
400 
401 static int
402 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
403 {
404 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
405 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
406 	struct aq_hw_s *hw;
407 
408 	PMD_INIT_FUNC_TRACE();
409 
410 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
411 		return -EPERM;
412 
413 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
414 
415 	if (hw->adapter_stopped == 0)
416 		atl_dev_close(eth_dev);
417 
418 	eth_dev->dev_ops = NULL;
419 	eth_dev->rx_pkt_burst = NULL;
420 	eth_dev->tx_pkt_burst = NULL;
421 
422 	/* disable uio intr before callback unregister */
423 	rte_intr_disable(intr_handle);
424 	rte_intr_callback_unregister(intr_handle,
425 				     atl_dev_interrupt_handler, eth_dev);
426 
427 	rte_free(eth_dev->data->mac_addrs);
428 	eth_dev->data->mac_addrs = NULL;
429 
430 	return 0;
431 }
432 
433 static int
434 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
435 	struct rte_pci_device *pci_dev)
436 {
437 	return rte_eth_dev_pci_generic_probe(pci_dev,
438 		sizeof(struct atl_adapter), eth_atl_dev_init);
439 }
440 
441 static int
442 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
443 {
444 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
445 }
446 
447 static int
448 atl_dev_configure(struct rte_eth_dev *dev)
449 {
450 	struct atl_interrupt *intr =
451 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
452 
453 	PMD_INIT_FUNC_TRACE();
454 
455 	/* set flag to update link status after init */
456 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
457 
458 	return 0;
459 }
460 
461 /*
462  * Configure device link speed and setup link.
463  * It returns 0 on success.
464  */
465 static int
466 atl_dev_start(struct rte_eth_dev *dev)
467 {
468 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
469 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
470 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
471 	uint32_t intr_vector = 0;
472 	int status;
473 	int err;
474 
475 	PMD_INIT_FUNC_TRACE();
476 
477 	/* set adapter started */
478 	hw->adapter_stopped = 0;
479 
480 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
481 		PMD_INIT_LOG(ERR,
482 		"Invalid link_speeds for port %u, fix speed not supported",
483 				dev->data->port_id);
484 		return -EINVAL;
485 	}
486 
487 	/* disable uio/vfio intr/eventfd mapping */
488 	rte_intr_disable(intr_handle);
489 
490 	/* reinitialize adapter
491 	 * this calls reset and start
492 	 */
493 	status = atl_reset_hw(hw);
494 	if (status != 0)
495 		return -EIO;
496 
497 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
498 
499 	hw_atl_b0_hw_start(hw);
500 	/* check and configure queue intr-vector mapping */
501 	if ((rte_intr_cap_multiple(intr_handle) ||
502 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
503 	    dev->data->dev_conf.intr_conf.rxq != 0) {
504 		intr_vector = dev->data->nb_rx_queues;
505 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
506 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
507 					ATL_MAX_INTR_QUEUE_NUM);
508 			return -ENOTSUP;
509 		}
510 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
511 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
512 			return -1;
513 		}
514 	}
515 
516 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
517 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
518 				    dev->data->nb_rx_queues * sizeof(int), 0);
519 		if (intr_handle->intr_vec == NULL) {
520 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
521 				     " intr_vec", dev->data->nb_rx_queues);
522 			return -ENOMEM;
523 		}
524 	}
525 
526 	/* initialize transmission unit */
527 	atl_tx_init(dev);
528 
529 	/* This can fail when allocating mbufs for descriptor rings */
530 	err = atl_rx_init(dev);
531 	if (err) {
532 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
533 		goto error;
534 	}
535 
536 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
537 		hw->fw_ver_actual >> 24,
538 		(hw->fw_ver_actual >> 16) & 0xFF,
539 		hw->fw_ver_actual & 0xFFFF);
540 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
541 
542 	err = atl_start_queues(dev);
543 	if (err < 0) {
544 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
545 		goto error;
546 	}
547 
548 	err = atl_dev_set_link_up(dev);
549 
550 	err = hw->aq_fw_ops->update_link_status(hw);
551 
552 	if (err)
553 		goto error;
554 
555 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
556 
557 	if (err)
558 		goto error;
559 
560 	if (rte_intr_allow_others(intr_handle)) {
561 		/* check if lsc interrupt is enabled */
562 		if (dev->data->dev_conf.intr_conf.lsc != 0)
563 			atl_dev_lsc_interrupt_setup(dev, true);
564 		else
565 			atl_dev_lsc_interrupt_setup(dev, false);
566 	} else {
567 		rte_intr_callback_unregister(intr_handle,
568 					     atl_dev_interrupt_handler, dev);
569 		if (dev->data->dev_conf.intr_conf.lsc != 0)
570 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
571 				     " no intr multiplex");
572 	}
573 
574 	/* check if rxq interrupt is enabled */
575 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
576 	    rte_intr_dp_is_en(intr_handle))
577 		atl_dev_rxq_interrupt_setup(dev);
578 
579 	/* enable uio/vfio intr/eventfd mapping */
580 	rte_intr_enable(intr_handle);
581 
582 	/* resume enabled intr since hw reset */
583 	atl_enable_intr(dev);
584 
585 	return 0;
586 
587 error:
588 	atl_stop_queues(dev);
589 	return -EIO;
590 }
591 
592 /*
593  * Stop device: disable rx and tx functions to allow for reconfiguring.
594  */
595 static void
596 atl_dev_stop(struct rte_eth_dev *dev)
597 {
598 	struct rte_eth_link link;
599 	struct aq_hw_s *hw =
600 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
601 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
602 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
603 
604 	PMD_INIT_FUNC_TRACE();
605 
606 	/* disable interrupts */
607 	atl_disable_intr(hw);
608 
609 	/* reset the NIC */
610 	atl_reset_hw(hw);
611 	hw->adapter_stopped = 1;
612 
613 	atl_stop_queues(dev);
614 
615 	/* Clear stored conf */
616 	dev->data->scattered_rx = 0;
617 	dev->data->lro = 0;
618 
619 	/* Clear recorded link status */
620 	memset(&link, 0, sizeof(link));
621 	rte_eth_linkstatus_set(dev, &link);
622 
623 	if (!rte_intr_allow_others(intr_handle))
624 		/* resume to the default handler */
625 		rte_intr_callback_register(intr_handle,
626 					   atl_dev_interrupt_handler,
627 					   (void *)dev);
628 
629 	/* Clean datapath event and queue/vec mapping */
630 	rte_intr_efd_disable(intr_handle);
631 	if (intr_handle->intr_vec != NULL) {
632 		rte_free(intr_handle->intr_vec);
633 		intr_handle->intr_vec = NULL;
634 	}
635 }
636 
637 /*
638  * Set device link up: enable tx.
639  */
640 static int
641 atl_dev_set_link_up(struct rte_eth_dev *dev)
642 {
643 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
645 	uint32_t speed_mask = 0;
646 
647 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
648 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
649 	} else {
650 		if (link_speeds & ETH_LINK_SPEED_10G)
651 			speed_mask |= AQ_NIC_RATE_10G;
652 		if (link_speeds & ETH_LINK_SPEED_5G)
653 			speed_mask |= AQ_NIC_RATE_5G;
654 		if (link_speeds & ETH_LINK_SPEED_1G)
655 			speed_mask |= AQ_NIC_RATE_1G;
656 		if (link_speeds & ETH_LINK_SPEED_2_5G)
657 			speed_mask |=  AQ_NIC_RATE_2G5;
658 		if (link_speeds & ETH_LINK_SPEED_100M)
659 			speed_mask |= AQ_NIC_RATE_100M;
660 	}
661 
662 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
663 }
664 
665 /*
666  * Set device link down: disable tx.
667  */
668 static int
669 atl_dev_set_link_down(struct rte_eth_dev *dev)
670 {
671 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
672 
673 	return hw->aq_fw_ops->set_link_speed(hw, 0);
674 }
675 
676 /*
677  * Reset and stop device.
678  */
679 static void
680 atl_dev_close(struct rte_eth_dev *dev)
681 {
682 	PMD_INIT_FUNC_TRACE();
683 
684 	atl_dev_stop(dev);
685 
686 	atl_free_queues(dev);
687 }
688 
689 static int
690 atl_dev_reset(struct rte_eth_dev *dev)
691 {
692 	int ret;
693 
694 	ret = eth_atl_dev_uninit(dev);
695 	if (ret)
696 		return ret;
697 
698 	ret = eth_atl_dev_init(dev);
699 
700 	return ret;
701 }
702 
703 int atl_macsec_enable(struct rte_eth_dev *dev,
704 		      uint8_t encr, uint8_t repl_prot)
705 {
706 	struct aq_hw_cfg_s *cfg =
707 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
708 
709 	cfg->aq_macsec.common.macsec_enabled = 1;
710 	cfg->aq_macsec.common.encryption_enabled = encr;
711 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
712 
713 	return 0;
714 }
715 
716 int atl_macsec_disable(struct rte_eth_dev *dev)
717 {
718 	struct aq_hw_cfg_s *cfg =
719 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
720 
721 	cfg->aq_macsec.common.macsec_enabled = 0;
722 
723 	return 0;
724 }
725 
726 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
727 {
728 	struct aq_hw_cfg_s *cfg =
729 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
730 
731 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
732 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
733 
734 	return 0;
735 }
736 
737 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
738 			   uint8_t *mac, uint16_t pi)
739 {
740 	struct aq_hw_cfg_s *cfg =
741 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
742 
743 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
744 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
745 	cfg->aq_macsec.rxsc.pi = pi;
746 
747 	return 0;
748 }
749 
750 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
751 			   uint8_t idx, uint8_t an,
752 			   uint32_t pn, uint8_t *key)
753 {
754 	struct aq_hw_cfg_s *cfg =
755 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
756 
757 	cfg->aq_macsec.txsa.idx = idx;
758 	cfg->aq_macsec.txsa.pn = pn;
759 	cfg->aq_macsec.txsa.an = an;
760 
761 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
762 	return 0;
763 }
764 
765 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
766 			   uint8_t idx, uint8_t an,
767 			   uint32_t pn, uint8_t *key)
768 {
769 	struct aq_hw_cfg_s *cfg =
770 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
771 
772 	cfg->aq_macsec.rxsa.idx = idx;
773 	cfg->aq_macsec.rxsa.pn = pn;
774 	cfg->aq_macsec.rxsa.an = an;
775 
776 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
777 	return 0;
778 }
779 
780 static int
781 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
782 {
783 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
784 	struct aq_hw_s *hw = &adapter->hw;
785 	struct atl_sw_stats *swstats = &adapter->sw_stats;
786 	unsigned int i;
787 
788 	hw->aq_fw_ops->update_stats(hw);
789 
790 	/* Fill out the rte_eth_stats statistics structure */
791 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
792 	stats->ibytes = hw->curr_stats.dma_oct_rc;
793 	stats->imissed = hw->curr_stats.dpc;
794 	stats->ierrors = hw->curr_stats.erpt;
795 
796 	stats->opackets = hw->curr_stats.dma_pkt_tc;
797 	stats->obytes = hw->curr_stats.dma_oct_tc;
798 	stats->oerrors = 0;
799 
800 	stats->rx_nombuf = swstats->rx_nombuf;
801 
802 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
803 		stats->q_ipackets[i] = swstats->q_ipackets[i];
804 		stats->q_opackets[i] = swstats->q_opackets[i];
805 		stats->q_ibytes[i] = swstats->q_ibytes[i];
806 		stats->q_obytes[i] = swstats->q_obytes[i];
807 		stats->q_errors[i] = swstats->q_errors[i];
808 	}
809 	return 0;
810 }
811 
812 static void
813 atl_dev_stats_reset(struct rte_eth_dev *dev)
814 {
815 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
816 	struct aq_hw_s *hw = &adapter->hw;
817 
818 	hw->aq_fw_ops->update_stats(hw);
819 
820 	/* Reset software totals */
821 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
822 
823 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
824 }
825 
826 static int
827 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
828 			 struct rte_eth_xstat_name *xstats_names,
829 			 unsigned int size)
830 {
831 	unsigned int i;
832 
833 	if (!xstats_names)
834 		return RTE_DIM(atl_xstats_tbl);
835 
836 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
837 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
838 			RTE_ETH_XSTATS_NAME_SIZE);
839 
840 	return i;
841 }
842 
843 static int
844 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
845 		   unsigned int n)
846 {
847 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
848 	struct aq_hw_s *hw = &adapter->hw;
849 	unsigned int i;
850 
851 	if (!stats)
852 		return 0;
853 
854 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
855 		stats[i].id = i;
856 		stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
857 					atl_xstats_tbl[i].offset);
858 	}
859 
860 	return i;
861 }
862 
863 static int
864 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
865 {
866 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
867 	uint32_t fw_ver = 0;
868 	unsigned int ret = 0;
869 
870 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
871 	if (ret)
872 		return -EIO;
873 
874 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
875 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
876 
877 	ret += 1; /* add string null-terminator */
878 
879 	if (fw_size < ret)
880 		return ret;
881 
882 	return 0;
883 }
884 
885 static void
886 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
887 {
888 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
889 
890 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
891 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
892 
893 	dev_info->min_rx_bufsize = 1024;
894 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
895 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
896 	dev_info->max_vfs = pci_dev->max_vfs;
897 
898 	dev_info->max_hash_mac_addrs = 0;
899 	dev_info->max_vmdq_pools = 0;
900 	dev_info->vmdq_queue_num = 0;
901 
902 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
903 
904 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
905 
906 
907 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
908 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
909 	};
910 
911 	dev_info->default_txconf = (struct rte_eth_txconf) {
912 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
913 	};
914 
915 	dev_info->rx_desc_lim = rx_desc_lim;
916 	dev_info->tx_desc_lim = tx_desc_lim;
917 
918 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
919 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
920 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
921 
922 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
923 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
924 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
925 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
926 }
927 
928 static const uint32_t *
929 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
930 {
931 	static const uint32_t ptypes[] = {
932 		RTE_PTYPE_L2_ETHER,
933 		RTE_PTYPE_L2_ETHER_ARP,
934 		RTE_PTYPE_L2_ETHER_VLAN,
935 		RTE_PTYPE_L3_IPV4,
936 		RTE_PTYPE_L3_IPV6,
937 		RTE_PTYPE_L4_TCP,
938 		RTE_PTYPE_L4_UDP,
939 		RTE_PTYPE_L4_SCTP,
940 		RTE_PTYPE_L4_ICMP,
941 		RTE_PTYPE_UNKNOWN
942 	};
943 
944 	if (dev->rx_pkt_burst == atl_recv_pkts)
945 		return ptypes;
946 
947 	return NULL;
948 }
949 
950 /* return 0 means link status changed, -1 means not changed */
951 static int
952 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
953 {
954 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
955 	struct atl_interrupt *intr =
956 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
957 	struct rte_eth_link link, old;
958 	int err = 0;
959 
960 	link.link_status = ETH_LINK_DOWN;
961 	link.link_speed = 0;
962 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
963 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
964 	memset(&old, 0, sizeof(old));
965 
966 	/* load old link status */
967 	rte_eth_linkstatus_get(dev, &old);
968 
969 	/* read current link status */
970 	err = hw->aq_fw_ops->update_link_status(hw);
971 
972 	if (err)
973 		return 0;
974 
975 	if (hw->aq_link_status.mbps == 0) {
976 		/* write default (down) link status */
977 		rte_eth_linkstatus_set(dev, &link);
978 		if (link.link_status == old.link_status)
979 			return -1;
980 		return 0;
981 	}
982 
983 	intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
984 
985 	link.link_status = ETH_LINK_UP;
986 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
987 	link.link_speed = hw->aq_link_status.mbps;
988 
989 	rte_eth_linkstatus_set(dev, &link);
990 
991 	if (link.link_status == old.link_status)
992 		return -1;
993 
994 	return 0;
995 }
996 
997 static void
998 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
999 {
1000 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1001 
1002 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1003 }
1004 
1005 static void
1006 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1007 {
1008 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1009 
1010 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1011 }
1012 
1013 static void
1014 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1015 {
1016 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1017 
1018 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1019 }
1020 
1021 static void
1022 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1023 {
1024 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1025 
1026 	if (dev->data->promiscuous == 1)
1027 		return; /* must remain in all_multicast mode */
1028 
1029 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1030 }
1031 
1032 /**
1033  * It clears the interrupt causes and enables the interrupt.
1034  * It will be called once only during nic initialized.
1035  *
1036  * @param dev
1037  *  Pointer to struct rte_eth_dev.
1038  * @param on
1039  *  Enable or Disable.
1040  *
1041  * @return
1042  *  - On success, zero.
1043  *  - On failure, a negative value.
1044  */
1045 
1046 static int
1047 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1048 {
1049 	atl_dev_link_status_print(dev);
1050 	return 0;
1051 }
1052 
1053 static int
1054 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1055 {
1056 	return 0;
1057 }
1058 
1059 
1060 static int
1061 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1062 {
1063 	struct atl_interrupt *intr =
1064 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1065 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1066 	u64 cause = 0;
1067 
1068 	hw_atl_b0_hw_irq_read(hw, &cause);
1069 
1070 	atl_disable_intr(hw);
1071 	intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
1072 			ATL_FLAG_NEED_LINK_UPDATE : 0;
1073 
1074 	return 0;
1075 }
1076 
1077 /**
1078  * It gets and then prints the link status.
1079  *
1080  * @param dev
1081  *  Pointer to struct rte_eth_dev.
1082  *
1083  * @return
1084  *  - On success, zero.
1085  *  - On failure, a negative value.
1086  */
1087 static void
1088 atl_dev_link_status_print(struct rte_eth_dev *dev)
1089 {
1090 	struct rte_eth_link link;
1091 
1092 	memset(&link, 0, sizeof(link));
1093 	rte_eth_linkstatus_get(dev, &link);
1094 	if (link.link_status) {
1095 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1096 					(int)(dev->data->port_id),
1097 					(unsigned int)link.link_speed,
1098 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1099 					"full-duplex" : "half-duplex");
1100 	} else {
1101 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1102 				(int)(dev->data->port_id));
1103 	}
1104 
1105 
1106 #ifdef DEBUG
1107 {
1108 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1109 
1110 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1111 				pci_dev->addr.domain,
1112 				pci_dev->addr.bus,
1113 				pci_dev->addr.devid,
1114 				pci_dev->addr.function);
1115 }
1116 #endif
1117 
1118 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1119 }
1120 
1121 /*
1122  * It executes link_update after knowing an interrupt occurred.
1123  *
1124  * @param dev
1125  *  Pointer to struct rte_eth_dev.
1126  *
1127  * @return
1128  *  - On success, zero.
1129  *  - On failure, a negative value.
1130  */
1131 static int
1132 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1133 			   struct rte_intr_handle *intr_handle)
1134 {
1135 	struct atl_interrupt *intr =
1136 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1137 
1138 	if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1139 		atl_dev_link_update(dev, 0);
1140 		intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1141 		atl_dev_link_status_print(dev);
1142 		_rte_eth_dev_callback_process(dev,
1143 			RTE_ETH_EVENT_INTR_LSC, NULL);
1144 	}
1145 
1146 	atl_enable_intr(dev);
1147 	rte_intr_enable(intr_handle);
1148 
1149 	return 0;
1150 }
1151 
1152 /**
1153  * Interrupt handler triggered by NIC  for handling
1154  * specific interrupt.
1155  *
1156  * @param handle
1157  *  Pointer to interrupt handle.
1158  * @param param
1159  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1160  *
1161  * @return
1162  *  void
1163  */
1164 static void
1165 atl_dev_interrupt_handler(void *param)
1166 {
1167 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1168 
1169 	atl_dev_interrupt_get_status(dev);
1170 	atl_dev_interrupt_action(dev, dev->intr_handle);
1171 }
1172 
1173 #define SFP_EEPROM_SIZE 0xff
1174 
1175 static int
1176 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1177 {
1178 	return SFP_EEPROM_SIZE;
1179 }
1180 
1181 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1182 		       struct rte_dev_eeprom_info *eeprom)
1183 {
1184 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1185 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1186 
1187 	if (hw->aq_fw_ops->get_eeprom == NULL)
1188 		return -ENOTSUP;
1189 
1190 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1191 	    eeprom->data == NULL)
1192 		return -EINVAL;
1193 
1194 	if (eeprom->magic)
1195 		dev_addr = eeprom->magic;
1196 
1197 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1198 					 eeprom->length, eeprom->offset);
1199 }
1200 
1201 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1202 		       struct rte_dev_eeprom_info *eeprom)
1203 {
1204 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1205 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1206 
1207 	if (hw->aq_fw_ops->set_eeprom == NULL)
1208 		return -ENOTSUP;
1209 
1210 	if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1211 		return -EINVAL;
1212 
1213 	if (eeprom->magic)
1214 		dev_addr = eeprom->magic;
1215 
1216 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr,
1217 					 eeprom->data, eeprom->length);
1218 }
1219 
1220 static int
1221 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1222 {
1223 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1224 	u32 mif_id;
1225 	int err;
1226 
1227 	if (regs->data == NULL) {
1228 		regs->length = hw_atl_utils_hw_get_reg_length();
1229 		regs->width = sizeof(u32);
1230 		return 0;
1231 	}
1232 
1233 	/* Only full register dump is supported */
1234 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1235 		return -ENOTSUP;
1236 
1237 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1238 
1239 	/* Device version */
1240 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1241 	regs->version = mif_id & 0xFFU;
1242 
1243 	return err;
1244 }
1245 
1246 static int
1247 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1248 {
1249 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1250 
1251 	if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1252 		fc_conf->mode = RTE_FC_NONE;
1253 	else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1254 		fc_conf->mode = RTE_FC_FULL;
1255 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1256 		fc_conf->mode = RTE_FC_RX_PAUSE;
1257 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1258 		fc_conf->mode = RTE_FC_TX_PAUSE;
1259 
1260 	return 0;
1261 }
1262 
1263 static int
1264 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1265 {
1266 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1267 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1268 
1269 
1270 	if (hw->aq_fw_ops->set_flow_control == NULL)
1271 		return -ENOTSUP;
1272 
1273 	if (fc_conf->mode == RTE_FC_NONE)
1274 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1275 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1276 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1277 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1278 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1279 	else if (fc_conf->mode == RTE_FC_FULL)
1280 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1281 
1282 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1283 		return hw->aq_fw_ops->set_flow_control(hw);
1284 
1285 	return 0;
1286 }
1287 
1288 static int
1289 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1290 		    u8 *mac_addr, bool enable)
1291 {
1292 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1293 	unsigned int h = 0U;
1294 	unsigned int l = 0U;
1295 	int err;
1296 
1297 	if (mac_addr) {
1298 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1299 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1300 			(mac_addr[4] << 8) | mac_addr[5];
1301 	}
1302 
1303 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1304 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1305 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1306 
1307 	if (enable)
1308 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1309 
1310 	err = aq_hw_err_from_flags(hw);
1311 
1312 	return err;
1313 }
1314 
1315 static int
1316 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1317 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1318 {
1319 	if (is_zero_ether_addr(mac_addr)) {
1320 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1321 		return -EINVAL;
1322 	}
1323 
1324 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1325 }
1326 
1327 static void
1328 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1329 {
1330 	atl_update_mac_addr(dev, index, NULL, false);
1331 }
1332 
1333 static int
1334 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1335 {
1336 	atl_remove_mac_addr(dev, 0);
1337 	atl_add_mac_addr(dev, addr, 0, 0);
1338 	return 0;
1339 }
1340 
1341 static int
1342 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1343 {
1344 	struct rte_eth_dev_info dev_info;
1345 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1346 
1347 	atl_dev_info_get(dev, &dev_info);
1348 
1349 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1350 		return -EINVAL;
1351 
1352 	/* update max frame size */
1353 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1354 
1355 	return 0;
1356 }
1357 
1358 static int
1359 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1360 {
1361 	struct aq_hw_cfg_s *cfg =
1362 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1363 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1364 	int err = 0;
1365 	int i = 0;
1366 
1367 	PMD_INIT_FUNC_TRACE();
1368 
1369 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1370 		if (cfg->vlan_filter[i] == vlan_id) {
1371 			if (!on) {
1372 				/* Disable VLAN filter. */
1373 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1374 
1375 				/* Clear VLAN filter entry */
1376 				cfg->vlan_filter[i] = 0;
1377 			}
1378 			break;
1379 		}
1380 	}
1381 
1382 	/* VLAN_ID was not found. So, nothing to delete. */
1383 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1384 		goto exit;
1385 
1386 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1387 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1388 		goto exit;
1389 
1390 	/* Try to found free VLAN filter to add new VLAN_ID */
1391 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1392 		if (cfg->vlan_filter[i] == 0)
1393 			break;
1394 	}
1395 
1396 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1397 		/* We have no free VLAN filter to add new VLAN_ID*/
1398 		err = -ENOMEM;
1399 		goto exit;
1400 	}
1401 
1402 	cfg->vlan_filter[i] = vlan_id;
1403 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1404 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1405 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1406 
1407 exit:
1408 	/* Enable VLAN promisc mode if vlan_filter empty  */
1409 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1410 		if (cfg->vlan_filter[i] != 0)
1411 			break;
1412 	}
1413 
1414 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1415 
1416 	return err;
1417 }
1418 
1419 static int
1420 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1421 {
1422 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1423 	struct aq_hw_cfg_s *cfg =
1424 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1425 	int i;
1426 
1427 	PMD_INIT_FUNC_TRACE();
1428 
1429 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1430 		if (cfg->vlan_filter[i])
1431 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1432 	}
1433 	return 0;
1434 }
1435 
1436 static int
1437 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1438 {
1439 	struct aq_hw_cfg_s *cfg =
1440 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1441 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1442 	int ret = 0;
1443 	int i;
1444 
1445 	PMD_INIT_FUNC_TRACE();
1446 
1447 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1448 
1449 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1450 
1451 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1452 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1453 
1454 	if (mask & ETH_VLAN_EXTEND_MASK)
1455 		ret = -ENOTSUP;
1456 
1457 	return ret;
1458 }
1459 
1460 static int
1461 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1462 		  uint16_t tpid)
1463 {
1464 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1465 	int err = 0;
1466 
1467 	PMD_INIT_FUNC_TRACE();
1468 
1469 	switch (vlan_type) {
1470 	case ETH_VLAN_TYPE_INNER:
1471 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1472 		break;
1473 	case ETH_VLAN_TYPE_OUTER:
1474 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1475 		break;
1476 	default:
1477 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1478 		err = -ENOTSUP;
1479 	}
1480 
1481 	return err;
1482 }
1483 
1484 static void
1485 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1486 {
1487 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1488 
1489 	PMD_INIT_FUNC_TRACE();
1490 
1491 	if (queue_id > dev->data->nb_rx_queues) {
1492 		PMD_DRV_LOG(ERR, "Invalid queue id");
1493 		return;
1494 	}
1495 
1496 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1497 }
1498 
1499 static int
1500 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1501 			  struct ether_addr *mc_addr_set,
1502 			  uint32_t nb_mc_addr)
1503 {
1504 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1505 	u32 i;
1506 
1507 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1508 		return -EINVAL;
1509 
1510 	/* Update whole uc filters table */
1511 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1512 		u8 *mac_addr = NULL;
1513 		u32 l = 0, h = 0;
1514 
1515 		if (i < nb_mc_addr) {
1516 			mac_addr = mc_addr_set[i].addr_bytes;
1517 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1518 				(mac_addr[4] << 8) | mac_addr[5];
1519 			h = (mac_addr[0] << 8) | mac_addr[1];
1520 		}
1521 
1522 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1523 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1524 							HW_ATL_B0_MAC_MIN + i);
1525 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1526 							HW_ATL_B0_MAC_MIN + i);
1527 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1528 					   HW_ATL_B0_MAC_MIN + i);
1529 	}
1530 
1531 	return 0;
1532 }
1533 
1534 static int
1535 atl_reta_update(struct rte_eth_dev *dev,
1536 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1537 		   uint16_t reta_size)
1538 {
1539 	int i;
1540 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1541 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1542 
1543 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1544 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1545 					dev->data->nb_rx_queues - 1);
1546 
1547 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1548 	return 0;
1549 }
1550 
1551 static int
1552 atl_reta_query(struct rte_eth_dev *dev,
1553 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1554 		    uint16_t reta_size)
1555 {
1556 	int i;
1557 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1558 
1559 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1560 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1561 	reta_conf->mask = ~0U;
1562 	return 0;
1563 }
1564 
1565 static int
1566 atl_rss_hash_update(struct rte_eth_dev *dev,
1567 				 struct rte_eth_rss_conf *rss_conf)
1568 {
1569 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1570 	struct aq_hw_cfg_s *cfg =
1571 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1572 	static u8 def_rss_key[40] = {
1573 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1574 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1575 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1576 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1577 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1578 	};
1579 
1580 	cfg->is_rss = !!rss_conf->rss_hf;
1581 	if (rss_conf->rss_key) {
1582 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1583 		       rss_conf->rss_key_len);
1584 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1585 	} else {
1586 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1587 		       sizeof(def_rss_key));
1588 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1589 	}
1590 
1591 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1592 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1593 	return 0;
1594 }
1595 
1596 static int
1597 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1598 				 struct rte_eth_rss_conf *rss_conf)
1599 {
1600 	struct aq_hw_cfg_s *cfg =
1601 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1602 
1603 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1604 	if (rss_conf->rss_key) {
1605 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1606 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1607 		       rss_conf->rss_key_len);
1608 	}
1609 
1610 	return 0;
1611 }
1612 
1613 static bool
1614 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1615 {
1616 	if (strcmp(dev->device->driver->name, drv->driver.name))
1617 		return false;
1618 
1619 	return true;
1620 }
1621 
1622 bool
1623 is_atlantic_supported(struct rte_eth_dev *dev)
1624 {
1625 	return is_device_supported(dev, &rte_atl_pmd);
1626 }
1627 
1628 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1629 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1630 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1631 
1632 RTE_INIT(atl_init_log)
1633 {
1634 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1635 	if (atl_logtype_init >= 0)
1636 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1637 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1638 	if (atl_logtype_driver >= 0)
1639 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1640 }
1641 
1642