xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision 5ecb687a5698d2d8ec1f3b3b5a7a16bceca3e29c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 
8 #include "atl_ethdev.h"
9 #include "atl_common.h"
10 #include "atl_hw_regs.h"
11 #include "atl_logs.h"
12 #include "hw_atl/hw_atl_llh.h"
13 #include "hw_atl/hw_atl_b0.h"
14 #include "hw_atl/hw_atl_b0_internal.h"
15 
16 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
17 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 
19 static int  atl_dev_configure(struct rte_eth_dev *dev);
20 static int  atl_dev_start(struct rte_eth_dev *dev);
21 static void atl_dev_stop(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
24 static void atl_dev_close(struct rte_eth_dev *dev);
25 static int  atl_dev_reset(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
30 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 
32 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
33 				    struct rte_eth_xstat_name *xstats_names,
34 				    unsigned int size);
35 
36 static int atl_dev_stats_get(struct rte_eth_dev *dev,
37 				struct rte_eth_stats *stats);
38 
39 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
40 			      struct rte_eth_xstat *stats, unsigned int n);
41 
42 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 
44 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
45 			      size_t fw_size);
46 
47 static void atl_dev_info_get(struct rte_eth_dev *dev,
48 			       struct rte_eth_dev_info *dev_info);
49 
50 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
51 
52 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
53 
54 /* VLAN stuff */
55 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
56 		uint16_t vlan_id, int on);
57 
58 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
59 
60 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
61 				     uint16_t queue_id, int on);
62 
63 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
64 			     enum rte_vlan_type vlan_type, uint16_t tpid);
65 
66 /* EEPROM */
67 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
68 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
69 			      struct rte_dev_eeprom_info *eeprom);
70 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
71 			      struct rte_dev_eeprom_info *eeprom);
72 
73 /* Regs */
74 static int atl_dev_get_regs(struct rte_eth_dev *dev,
75 			    struct rte_dev_reg_info *regs);
76 
77 /* Flow control */
78 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
79 			       struct rte_eth_fc_conf *fc_conf);
80 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
81 			       struct rte_eth_fc_conf *fc_conf);
82 
83 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
84 
85 /* Interrupts */
86 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
87 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
88 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
89 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
90 				    struct rte_intr_handle *handle);
91 static void atl_dev_interrupt_handler(void *param);
92 
93 
94 static int atl_add_mac_addr(struct rte_eth_dev *dev,
95 			    struct ether_addr *mac_addr,
96 			    uint32_t index, uint32_t pool);
97 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
98 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
99 					   struct ether_addr *mac_addr);
100 
101 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
102 				    struct ether_addr *mc_addr_set,
103 				    uint32_t nb_mc_addr);
104 
105 /* RSS */
106 static int atl_reta_update(struct rte_eth_dev *dev,
107 			     struct rte_eth_rss_reta_entry64 *reta_conf,
108 			     uint16_t reta_size);
109 static int atl_reta_query(struct rte_eth_dev *dev,
110 			    struct rte_eth_rss_reta_entry64 *reta_conf,
111 			    uint16_t reta_size);
112 static int atl_rss_hash_update(struct rte_eth_dev *dev,
113 				 struct rte_eth_rss_conf *rss_conf);
114 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
115 				   struct rte_eth_rss_conf *rss_conf);
116 
117 
118 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
119 	struct rte_pci_device *pci_dev);
120 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
121 
122 static void atl_dev_info_get(struct rte_eth_dev *dev,
123 				struct rte_eth_dev_info *dev_info);
124 
125 int atl_logtype_init;
126 int atl_logtype_driver;
127 
128 /*
129  * The set of PCI devices this driver supports
130  */
131 static const struct rte_pci_id pci_id_atl_map[] = {
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
137 
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
144 
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
149 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
151 
152 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
153 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
154 	{ .vendor_id = 0, /* sentinel */ },
155 };
156 
157 static struct rte_pci_driver rte_atl_pmd = {
158 	.id_table = pci_id_atl_map,
159 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
160 		     RTE_PCI_DRV_IOVA_AS_VA,
161 	.probe = eth_atl_pci_probe,
162 	.remove = eth_atl_pci_remove,
163 };
164 
165 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
166 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
167 			| DEV_RX_OFFLOAD_UDP_CKSUM \
168 			| DEV_RX_OFFLOAD_TCP_CKSUM \
169 			| DEV_RX_OFFLOAD_JUMBO_FRAME \
170 			| DEV_RX_OFFLOAD_VLAN_FILTER)
171 
172 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
173 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
174 			| DEV_TX_OFFLOAD_UDP_CKSUM \
175 			| DEV_TX_OFFLOAD_TCP_CKSUM \
176 			| DEV_TX_OFFLOAD_TCP_TSO \
177 			| DEV_TX_OFFLOAD_MULTI_SEGS)
178 
179 static const struct rte_eth_desc_lim rx_desc_lim = {
180 	.nb_max = ATL_MAX_RING_DESC,
181 	.nb_min = ATL_MIN_RING_DESC,
182 	.nb_align = ATL_RXD_ALIGN,
183 };
184 
185 static const struct rte_eth_desc_lim tx_desc_lim = {
186 	.nb_max = ATL_MAX_RING_DESC,
187 	.nb_min = ATL_MIN_RING_DESC,
188 	.nb_align = ATL_TXD_ALIGN,
189 	.nb_seg_max = ATL_TX_MAX_SEG,
190 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
191 };
192 
193 #define ATL_XSTATS_FIELD(name) { \
194 	#name, \
195 	offsetof(struct aq_stats_s, name) \
196 }
197 
198 struct atl_xstats_tbl_s {
199 	const char *name;
200 	unsigned int offset;
201 };
202 
203 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
204 	ATL_XSTATS_FIELD(uprc),
205 	ATL_XSTATS_FIELD(mprc),
206 	ATL_XSTATS_FIELD(bprc),
207 	ATL_XSTATS_FIELD(erpt),
208 	ATL_XSTATS_FIELD(uptc),
209 	ATL_XSTATS_FIELD(mptc),
210 	ATL_XSTATS_FIELD(bptc),
211 	ATL_XSTATS_FIELD(erpr),
212 	ATL_XSTATS_FIELD(ubrc),
213 	ATL_XSTATS_FIELD(ubtc),
214 	ATL_XSTATS_FIELD(mbrc),
215 	ATL_XSTATS_FIELD(mbtc),
216 	ATL_XSTATS_FIELD(bbrc),
217 	ATL_XSTATS_FIELD(bbtc),
218 };
219 
220 static const struct eth_dev_ops atl_eth_dev_ops = {
221 	.dev_configure	      = atl_dev_configure,
222 	.dev_start	      = atl_dev_start,
223 	.dev_stop	      = atl_dev_stop,
224 	.dev_set_link_up      = atl_dev_set_link_up,
225 	.dev_set_link_down    = atl_dev_set_link_down,
226 	.dev_close	      = atl_dev_close,
227 	.dev_reset	      = atl_dev_reset,
228 
229 	/* PROMISC */
230 	.promiscuous_enable   = atl_dev_promiscuous_enable,
231 	.promiscuous_disable  = atl_dev_promiscuous_disable,
232 	.allmulticast_enable  = atl_dev_allmulticast_enable,
233 	.allmulticast_disable = atl_dev_allmulticast_disable,
234 
235 	/* Link */
236 	.link_update	      = atl_dev_link_update,
237 
238 	.get_reg              = atl_dev_get_regs,
239 
240 	/* Stats */
241 	.stats_get	      = atl_dev_stats_get,
242 	.xstats_get	      = atl_dev_xstats_get,
243 	.xstats_get_names     = atl_dev_xstats_get_names,
244 	.stats_reset	      = atl_dev_stats_reset,
245 	.xstats_reset	      = atl_dev_stats_reset,
246 
247 	.fw_version_get       = atl_fw_version_get,
248 	.dev_infos_get	      = atl_dev_info_get,
249 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
250 
251 	.mtu_set              = atl_dev_mtu_set,
252 
253 	/* VLAN */
254 	.vlan_filter_set      = atl_vlan_filter_set,
255 	.vlan_offload_set     = atl_vlan_offload_set,
256 	.vlan_tpid_set        = atl_vlan_tpid_set,
257 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
258 
259 	/* Queue Control */
260 	.rx_queue_start	      = atl_rx_queue_start,
261 	.rx_queue_stop	      = atl_rx_queue_stop,
262 	.rx_queue_setup       = atl_rx_queue_setup,
263 	.rx_queue_release     = atl_rx_queue_release,
264 
265 	.tx_queue_start	      = atl_tx_queue_start,
266 	.tx_queue_stop	      = atl_tx_queue_stop,
267 	.tx_queue_setup       = atl_tx_queue_setup,
268 	.tx_queue_release     = atl_tx_queue_release,
269 
270 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
271 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
272 
273 	.rx_queue_count       = atl_rx_queue_count,
274 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
275 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
276 
277 	/* EEPROM */
278 	.get_eeprom_length    = atl_dev_get_eeprom_length,
279 	.get_eeprom           = atl_dev_get_eeprom,
280 	.set_eeprom           = atl_dev_set_eeprom,
281 
282 	/* Flow Control */
283 	.flow_ctrl_get	      = atl_flow_ctrl_get,
284 	.flow_ctrl_set	      = atl_flow_ctrl_set,
285 
286 	/* MAC */
287 	.mac_addr_add	      = atl_add_mac_addr,
288 	.mac_addr_remove      = atl_remove_mac_addr,
289 	.mac_addr_set	      = atl_set_default_mac_addr,
290 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
291 	.rxq_info_get	      = atl_rxq_info_get,
292 	.txq_info_get	      = atl_txq_info_get,
293 
294 	.reta_update          = atl_reta_update,
295 	.reta_query           = atl_reta_query,
296 	.rss_hash_update      = atl_rss_hash_update,
297 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
298 };
299 
300 static inline int32_t
301 atl_reset_hw(struct aq_hw_s *hw)
302 {
303 	return hw_atl_b0_hw_reset(hw);
304 }
305 
306 static inline void
307 atl_enable_intr(struct rte_eth_dev *dev)
308 {
309 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
310 
311 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
312 }
313 
314 static void
315 atl_disable_intr(struct aq_hw_s *hw)
316 {
317 	PMD_INIT_FUNC_TRACE();
318 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
319 }
320 
321 static int
322 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
323 {
324 	struct atl_adapter *adapter =
325 		(struct atl_adapter *)eth_dev->data->dev_private;
326 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
327 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
328 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
329 	int err = 0;
330 
331 	PMD_INIT_FUNC_TRACE();
332 
333 	eth_dev->dev_ops = &atl_eth_dev_ops;
334 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
335 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
336 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
337 
338 	/* For secondary processes, the primary process has done all the work */
339 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
340 		return 0;
341 
342 	/* Vendor and Device ID need to be set before init of shared code */
343 	hw->device_id = pci_dev->id.device_id;
344 	hw->vendor_id = pci_dev->id.vendor_id;
345 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
346 
347 	/* Hardware configuration - hardcode */
348 	adapter->hw_cfg.is_lro = false;
349 	adapter->hw_cfg.wol = false;
350 	adapter->hw_cfg.is_rss = false;
351 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
352 
353 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
354 			  AQ_NIC_RATE_5G |
355 			  AQ_NIC_RATE_2G5 |
356 			  AQ_NIC_RATE_1G |
357 			  AQ_NIC_RATE_100M;
358 
359 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
360 	adapter->hw_cfg.aq_rss.indirection_table_size =
361 		HW_ATL_B0_RSS_REDIRECTION_MAX;
362 
363 	hw->aq_nic_cfg = &adapter->hw_cfg;
364 
365 	/* disable interrupt */
366 	atl_disable_intr(hw);
367 
368 	/* Allocate memory for storing MAC addresses */
369 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
370 	if (eth_dev->data->mac_addrs == NULL) {
371 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
372 		return -ENOMEM;
373 	}
374 
375 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
376 	if (err)
377 		return err;
378 
379 	/* Copy the permanent MAC address */
380 	if (hw->aq_fw_ops->get_mac_permanent(hw,
381 			eth_dev->data->mac_addrs->addr_bytes) != 0)
382 		return -EINVAL;
383 
384 	/* Reset the hw statistics */
385 	atl_dev_stats_reset(eth_dev);
386 
387 	rte_intr_callback_register(intr_handle,
388 				   atl_dev_interrupt_handler, eth_dev);
389 
390 	/* enable uio/vfio intr/eventfd mapping */
391 	rte_intr_enable(intr_handle);
392 
393 	/* enable support intr */
394 	atl_enable_intr(eth_dev);
395 
396 	return err;
397 }
398 
399 static int
400 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
401 {
402 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
403 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
404 	struct aq_hw_s *hw;
405 
406 	PMD_INIT_FUNC_TRACE();
407 
408 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
409 		return -EPERM;
410 
411 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
412 
413 	if (hw->adapter_stopped == 0)
414 		atl_dev_close(eth_dev);
415 
416 	eth_dev->dev_ops = NULL;
417 	eth_dev->rx_pkt_burst = NULL;
418 	eth_dev->tx_pkt_burst = NULL;
419 
420 	/* disable uio intr before callback unregister */
421 	rte_intr_disable(intr_handle);
422 	rte_intr_callback_unregister(intr_handle,
423 				     atl_dev_interrupt_handler, eth_dev);
424 
425 	rte_free(eth_dev->data->mac_addrs);
426 	eth_dev->data->mac_addrs = NULL;
427 
428 	return 0;
429 }
430 
431 static int
432 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
433 	struct rte_pci_device *pci_dev)
434 {
435 	return rte_eth_dev_pci_generic_probe(pci_dev,
436 		sizeof(struct atl_adapter), eth_atl_dev_init);
437 }
438 
439 static int
440 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
441 {
442 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
443 }
444 
445 static int
446 atl_dev_configure(struct rte_eth_dev *dev)
447 {
448 	struct atl_interrupt *intr =
449 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
450 
451 	PMD_INIT_FUNC_TRACE();
452 
453 	/* set flag to update link status after init */
454 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
455 
456 	return 0;
457 }
458 
459 /*
460  * Configure device link speed and setup link.
461  * It returns 0 on success.
462  */
463 static int
464 atl_dev_start(struct rte_eth_dev *dev)
465 {
466 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
467 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
468 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
469 	uint32_t intr_vector = 0;
470 	int status;
471 	int err;
472 
473 	PMD_INIT_FUNC_TRACE();
474 
475 	/* set adapter started */
476 	hw->adapter_stopped = 0;
477 
478 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
479 		PMD_INIT_LOG(ERR,
480 		"Invalid link_speeds for port %u, fix speed not supported",
481 				dev->data->port_id);
482 		return -EINVAL;
483 	}
484 
485 	/* disable uio/vfio intr/eventfd mapping */
486 	rte_intr_disable(intr_handle);
487 
488 	/* reinitialize adapter
489 	 * this calls reset and start
490 	 */
491 	status = atl_reset_hw(hw);
492 	if (status != 0)
493 		return -EIO;
494 
495 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
496 
497 	hw_atl_b0_hw_start(hw);
498 	/* check and configure queue intr-vector mapping */
499 	if ((rte_intr_cap_multiple(intr_handle) ||
500 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
501 	    dev->data->dev_conf.intr_conf.rxq != 0) {
502 		intr_vector = dev->data->nb_rx_queues;
503 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
504 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
505 					ATL_MAX_INTR_QUEUE_NUM);
506 			return -ENOTSUP;
507 		}
508 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
509 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
510 			return -1;
511 		}
512 	}
513 
514 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
515 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
516 				    dev->data->nb_rx_queues * sizeof(int), 0);
517 		if (intr_handle->intr_vec == NULL) {
518 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
519 				     " intr_vec", dev->data->nb_rx_queues);
520 			return -ENOMEM;
521 		}
522 	}
523 
524 	/* initialize transmission unit */
525 	atl_tx_init(dev);
526 
527 	/* This can fail when allocating mbufs for descriptor rings */
528 	err = atl_rx_init(dev);
529 	if (err) {
530 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
531 		goto error;
532 	}
533 
534 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
535 		hw->fw_ver_actual >> 24,
536 		(hw->fw_ver_actual >> 16) & 0xFF,
537 		hw->fw_ver_actual & 0xFFFF);
538 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
539 
540 	err = atl_start_queues(dev);
541 	if (err < 0) {
542 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
543 		goto error;
544 	}
545 
546 	err = atl_dev_set_link_up(dev);
547 
548 	err = hw->aq_fw_ops->update_link_status(hw);
549 
550 	if (err)
551 		goto error;
552 
553 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
554 
555 	if (err)
556 		goto error;
557 
558 	if (rte_intr_allow_others(intr_handle)) {
559 		/* check if lsc interrupt is enabled */
560 		if (dev->data->dev_conf.intr_conf.lsc != 0)
561 			atl_dev_lsc_interrupt_setup(dev, true);
562 		else
563 			atl_dev_lsc_interrupt_setup(dev, false);
564 	} else {
565 		rte_intr_callback_unregister(intr_handle,
566 					     atl_dev_interrupt_handler, dev);
567 		if (dev->data->dev_conf.intr_conf.lsc != 0)
568 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
569 				     " no intr multiplex");
570 	}
571 
572 	/* check if rxq interrupt is enabled */
573 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
574 	    rte_intr_dp_is_en(intr_handle))
575 		atl_dev_rxq_interrupt_setup(dev);
576 
577 	/* enable uio/vfio intr/eventfd mapping */
578 	rte_intr_enable(intr_handle);
579 
580 	/* resume enabled intr since hw reset */
581 	atl_enable_intr(dev);
582 
583 	return 0;
584 
585 error:
586 	atl_stop_queues(dev);
587 	return -EIO;
588 }
589 
590 /*
591  * Stop device: disable rx and tx functions to allow for reconfiguring.
592  */
593 static void
594 atl_dev_stop(struct rte_eth_dev *dev)
595 {
596 	struct rte_eth_link link;
597 	struct aq_hw_s *hw =
598 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
599 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
600 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
601 
602 	PMD_INIT_FUNC_TRACE();
603 
604 	/* disable interrupts */
605 	atl_disable_intr(hw);
606 
607 	/* reset the NIC */
608 	atl_reset_hw(hw);
609 	hw->adapter_stopped = 1;
610 
611 	atl_stop_queues(dev);
612 
613 	/* Clear stored conf */
614 	dev->data->scattered_rx = 0;
615 	dev->data->lro = 0;
616 
617 	/* Clear recorded link status */
618 	memset(&link, 0, sizeof(link));
619 	rte_eth_linkstatus_set(dev, &link);
620 
621 	if (!rte_intr_allow_others(intr_handle))
622 		/* resume to the default handler */
623 		rte_intr_callback_register(intr_handle,
624 					   atl_dev_interrupt_handler,
625 					   (void *)dev);
626 
627 	/* Clean datapath event and queue/vec mapping */
628 	rte_intr_efd_disable(intr_handle);
629 	if (intr_handle->intr_vec != NULL) {
630 		rte_free(intr_handle->intr_vec);
631 		intr_handle->intr_vec = NULL;
632 	}
633 }
634 
635 /*
636  * Set device link up: enable tx.
637  */
638 static int
639 atl_dev_set_link_up(struct rte_eth_dev *dev)
640 {
641 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
643 	uint32_t speed_mask = 0;
644 
645 	if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
646 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
647 	} else {
648 		if (link_speeds & ETH_LINK_SPEED_10G)
649 			speed_mask |= AQ_NIC_RATE_10G;
650 		if (link_speeds & ETH_LINK_SPEED_5G)
651 			speed_mask |= AQ_NIC_RATE_5G;
652 		if (link_speeds & ETH_LINK_SPEED_1G)
653 			speed_mask |= AQ_NIC_RATE_1G;
654 		if (link_speeds & ETH_LINK_SPEED_2_5G)
655 			speed_mask |=  AQ_NIC_RATE_2G5;
656 		if (link_speeds & ETH_LINK_SPEED_100M)
657 			speed_mask |= AQ_NIC_RATE_100M;
658 	}
659 
660 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
661 }
662 
663 /*
664  * Set device link down: disable tx.
665  */
666 static int
667 atl_dev_set_link_down(struct rte_eth_dev *dev)
668 {
669 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
670 
671 	return hw->aq_fw_ops->set_link_speed(hw, 0);
672 }
673 
674 /*
675  * Reset and stop device.
676  */
677 static void
678 atl_dev_close(struct rte_eth_dev *dev)
679 {
680 	PMD_INIT_FUNC_TRACE();
681 
682 	atl_dev_stop(dev);
683 
684 	atl_free_queues(dev);
685 }
686 
687 static int
688 atl_dev_reset(struct rte_eth_dev *dev)
689 {
690 	int ret;
691 
692 	ret = eth_atl_dev_uninit(dev);
693 	if (ret)
694 		return ret;
695 
696 	ret = eth_atl_dev_init(dev);
697 
698 	return ret;
699 }
700 
701 
702 static int
703 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
704 {
705 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
706 	struct aq_hw_s *hw = &adapter->hw;
707 	struct atl_sw_stats *swstats = &adapter->sw_stats;
708 	unsigned int i;
709 
710 	hw->aq_fw_ops->update_stats(hw);
711 
712 	/* Fill out the rte_eth_stats statistics structure */
713 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
714 	stats->ibytes = hw->curr_stats.dma_oct_rc;
715 	stats->imissed = hw->curr_stats.dpc;
716 	stats->ierrors = hw->curr_stats.erpt;
717 
718 	stats->opackets = hw->curr_stats.dma_pkt_tc;
719 	stats->obytes = hw->curr_stats.dma_oct_tc;
720 	stats->oerrors = 0;
721 
722 	stats->rx_nombuf = swstats->rx_nombuf;
723 
724 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
725 		stats->q_ipackets[i] = swstats->q_ipackets[i];
726 		stats->q_opackets[i] = swstats->q_opackets[i];
727 		stats->q_ibytes[i] = swstats->q_ibytes[i];
728 		stats->q_obytes[i] = swstats->q_obytes[i];
729 		stats->q_errors[i] = swstats->q_errors[i];
730 	}
731 	return 0;
732 }
733 
734 static void
735 atl_dev_stats_reset(struct rte_eth_dev *dev)
736 {
737 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
738 	struct aq_hw_s *hw = &adapter->hw;
739 
740 	hw->aq_fw_ops->update_stats(hw);
741 
742 	/* Reset software totals */
743 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
744 
745 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
746 }
747 
748 static int
749 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
750 			 struct rte_eth_xstat_name *xstats_names,
751 			 unsigned int size)
752 {
753 	unsigned int i;
754 
755 	if (!xstats_names)
756 		return RTE_DIM(atl_xstats_tbl);
757 
758 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
759 		strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
760 			RTE_ETH_XSTATS_NAME_SIZE);
761 
762 	return i;
763 }
764 
765 static int
766 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
767 		   unsigned int n)
768 {
769 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
770 	struct aq_hw_s *hw = &adapter->hw;
771 	unsigned int i;
772 
773 	if (!stats)
774 		return 0;
775 
776 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
777 		stats[i].id = i;
778 		stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
779 					atl_xstats_tbl[i].offset);
780 	}
781 
782 	return i;
783 }
784 
785 static int
786 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
787 {
788 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
789 	uint32_t fw_ver = 0;
790 	unsigned int ret = 0;
791 
792 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
793 	if (ret)
794 		return -EIO;
795 
796 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
797 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
798 
799 	ret += 1; /* add string null-terminator */
800 
801 	if (fw_size < ret)
802 		return ret;
803 
804 	return 0;
805 }
806 
807 static void
808 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
809 {
810 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
811 
812 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
813 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
814 
815 	dev_info->min_rx_bufsize = 1024;
816 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
817 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
818 	dev_info->max_vfs = pci_dev->max_vfs;
819 
820 	dev_info->max_hash_mac_addrs = 0;
821 	dev_info->max_vmdq_pools = 0;
822 	dev_info->vmdq_queue_num = 0;
823 
824 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
825 
826 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
827 
828 
829 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
830 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
831 	};
832 
833 	dev_info->default_txconf = (struct rte_eth_txconf) {
834 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
835 	};
836 
837 	dev_info->rx_desc_lim = rx_desc_lim;
838 	dev_info->tx_desc_lim = tx_desc_lim;
839 
840 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
841 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
842 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
843 
844 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
845 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
846 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
847 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
848 }
849 
850 static const uint32_t *
851 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
852 {
853 	static const uint32_t ptypes[] = {
854 		RTE_PTYPE_L2_ETHER,
855 		RTE_PTYPE_L2_ETHER_ARP,
856 		RTE_PTYPE_L2_ETHER_VLAN,
857 		RTE_PTYPE_L3_IPV4,
858 		RTE_PTYPE_L3_IPV6,
859 		RTE_PTYPE_L4_TCP,
860 		RTE_PTYPE_L4_UDP,
861 		RTE_PTYPE_L4_SCTP,
862 		RTE_PTYPE_L4_ICMP,
863 		RTE_PTYPE_UNKNOWN
864 	};
865 
866 	if (dev->rx_pkt_burst == atl_recv_pkts)
867 		return ptypes;
868 
869 	return NULL;
870 }
871 
872 /* return 0 means link status changed, -1 means not changed */
873 static int
874 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
875 {
876 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
877 	struct atl_interrupt *intr =
878 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
879 	struct rte_eth_link link, old;
880 	int err = 0;
881 
882 	link.link_status = ETH_LINK_DOWN;
883 	link.link_speed = 0;
884 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
885 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
886 	memset(&old, 0, sizeof(old));
887 
888 	/* load old link status */
889 	rte_eth_linkstatus_get(dev, &old);
890 
891 	/* read current link status */
892 	err = hw->aq_fw_ops->update_link_status(hw);
893 
894 	if (err)
895 		return 0;
896 
897 	if (hw->aq_link_status.mbps == 0) {
898 		/* write default (down) link status */
899 		rte_eth_linkstatus_set(dev, &link);
900 		if (link.link_status == old.link_status)
901 			return -1;
902 		return 0;
903 	}
904 
905 	intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
906 
907 	link.link_status = ETH_LINK_UP;
908 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
909 	link.link_speed = hw->aq_link_status.mbps;
910 
911 	rte_eth_linkstatus_set(dev, &link);
912 
913 	if (link.link_status == old.link_status)
914 		return -1;
915 
916 	return 0;
917 }
918 
919 static void
920 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
921 {
922 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
923 
924 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
925 }
926 
927 static void
928 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
929 {
930 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
931 
932 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
933 }
934 
935 static void
936 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
937 {
938 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
939 
940 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
941 }
942 
943 static void
944 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
945 {
946 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
947 
948 	if (dev->data->promiscuous == 1)
949 		return; /* must remain in all_multicast mode */
950 
951 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
952 }
953 
954 /**
955  * It clears the interrupt causes and enables the interrupt.
956  * It will be called once only during nic initialized.
957  *
958  * @param dev
959  *  Pointer to struct rte_eth_dev.
960  * @param on
961  *  Enable or Disable.
962  *
963  * @return
964  *  - On success, zero.
965  *  - On failure, a negative value.
966  */
967 
968 static int
969 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
970 {
971 	atl_dev_link_status_print(dev);
972 	return 0;
973 }
974 
975 static int
976 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
977 {
978 	return 0;
979 }
980 
981 
982 static int
983 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
984 {
985 	struct atl_interrupt *intr =
986 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
987 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
988 	u64 cause = 0;
989 
990 	hw_atl_b0_hw_irq_read(hw, &cause);
991 
992 	atl_disable_intr(hw);
993 	intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
994 			ATL_FLAG_NEED_LINK_UPDATE : 0;
995 
996 	return 0;
997 }
998 
999 /**
1000  * It gets and then prints the link status.
1001  *
1002  * @param dev
1003  *  Pointer to struct rte_eth_dev.
1004  *
1005  * @return
1006  *  - On success, zero.
1007  *  - On failure, a negative value.
1008  */
1009 static void
1010 atl_dev_link_status_print(struct rte_eth_dev *dev)
1011 {
1012 	struct rte_eth_link link;
1013 
1014 	memset(&link, 0, sizeof(link));
1015 	rte_eth_linkstatus_get(dev, &link);
1016 	if (link.link_status) {
1017 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1018 					(int)(dev->data->port_id),
1019 					(unsigned int)link.link_speed,
1020 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1021 					"full-duplex" : "half-duplex");
1022 	} else {
1023 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1024 				(int)(dev->data->port_id));
1025 	}
1026 
1027 
1028 #ifdef DEBUG
1029 {
1030 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1031 
1032 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1033 				pci_dev->addr.domain,
1034 				pci_dev->addr.bus,
1035 				pci_dev->addr.devid,
1036 				pci_dev->addr.function);
1037 }
1038 #endif
1039 
1040 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1041 }
1042 
1043 /*
1044  * It executes link_update after knowing an interrupt occurred.
1045  *
1046  * @param dev
1047  *  Pointer to struct rte_eth_dev.
1048  *
1049  * @return
1050  *  - On success, zero.
1051  *  - On failure, a negative value.
1052  */
1053 static int
1054 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1055 			   struct rte_intr_handle *intr_handle)
1056 {
1057 	struct atl_interrupt *intr =
1058 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1059 
1060 	if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1061 		atl_dev_link_update(dev, 0);
1062 		intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1063 		atl_dev_link_status_print(dev);
1064 		_rte_eth_dev_callback_process(dev,
1065 			RTE_ETH_EVENT_INTR_LSC, NULL);
1066 	}
1067 
1068 	atl_enable_intr(dev);
1069 	rte_intr_enable(intr_handle);
1070 
1071 	return 0;
1072 }
1073 
1074 /**
1075  * Interrupt handler triggered by NIC  for handling
1076  * specific interrupt.
1077  *
1078  * @param handle
1079  *  Pointer to interrupt handle.
1080  * @param param
1081  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1082  *
1083  * @return
1084  *  void
1085  */
1086 static void
1087 atl_dev_interrupt_handler(void *param)
1088 {
1089 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1090 
1091 	atl_dev_interrupt_get_status(dev);
1092 	atl_dev_interrupt_action(dev, dev->intr_handle);
1093 }
1094 
1095 #define SFP_EEPROM_SIZE 0xff
1096 
1097 static int
1098 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1099 {
1100 	return SFP_EEPROM_SIZE;
1101 }
1102 
1103 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1104 		       struct rte_dev_eeprom_info *eeprom)
1105 {
1106 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1107 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1108 
1109 	if (hw->aq_fw_ops->get_eeprom == NULL)
1110 		return -ENOTSUP;
1111 
1112 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1113 	    eeprom->data == NULL)
1114 		return -EINVAL;
1115 
1116 	if (eeprom->magic)
1117 		dev_addr = eeprom->magic;
1118 
1119 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1120 					 eeprom->length, eeprom->offset);
1121 }
1122 
1123 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1124 		       struct rte_dev_eeprom_info *eeprom)
1125 {
1126 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1127 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1128 
1129 	if (hw->aq_fw_ops->set_eeprom == NULL)
1130 		return -ENOTSUP;
1131 
1132 	if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1133 		return -EINVAL;
1134 
1135 	if (eeprom->magic)
1136 		dev_addr = eeprom->magic;
1137 
1138 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr,
1139 					 eeprom->data, eeprom->length);
1140 }
1141 
1142 static int
1143 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1144 {
1145 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1146 	u32 mif_id;
1147 	int err;
1148 
1149 	if (regs->data == NULL) {
1150 		regs->length = hw_atl_utils_hw_get_reg_length();
1151 		regs->width = sizeof(u32);
1152 		return 0;
1153 	}
1154 
1155 	/* Only full register dump is supported */
1156 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1157 		return -ENOTSUP;
1158 
1159 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1160 
1161 	/* Device version */
1162 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1163 	regs->version = mif_id & 0xFFU;
1164 
1165 	return err;
1166 }
1167 
1168 static int
1169 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1170 {
1171 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1172 
1173 	if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1174 		fc_conf->mode = RTE_FC_NONE;
1175 	else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1176 		fc_conf->mode = RTE_FC_FULL;
1177 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1178 		fc_conf->mode = RTE_FC_RX_PAUSE;
1179 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1180 		fc_conf->mode = RTE_FC_TX_PAUSE;
1181 
1182 	return 0;
1183 }
1184 
1185 static int
1186 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1187 {
1188 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1189 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1190 
1191 
1192 	if (hw->aq_fw_ops->set_flow_control == NULL)
1193 		return -ENOTSUP;
1194 
1195 	if (fc_conf->mode == RTE_FC_NONE)
1196 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1197 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1198 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1199 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1200 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1201 	else if (fc_conf->mode == RTE_FC_FULL)
1202 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1203 
1204 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1205 		return hw->aq_fw_ops->set_flow_control(hw);
1206 
1207 	return 0;
1208 }
1209 
1210 static int
1211 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1212 		    u8 *mac_addr, bool enable)
1213 {
1214 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 	unsigned int h = 0U;
1216 	unsigned int l = 0U;
1217 	int err;
1218 
1219 	if (mac_addr) {
1220 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1221 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1222 			(mac_addr[4] << 8) | mac_addr[5];
1223 	}
1224 
1225 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1226 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1227 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1228 
1229 	if (enable)
1230 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1231 
1232 	err = aq_hw_err_from_flags(hw);
1233 
1234 	return err;
1235 }
1236 
1237 static int
1238 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1239 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1240 {
1241 	if (is_zero_ether_addr(mac_addr)) {
1242 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1243 		return -EINVAL;
1244 	}
1245 
1246 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1247 }
1248 
1249 static void
1250 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1251 {
1252 	atl_update_mac_addr(dev, index, NULL, false);
1253 }
1254 
1255 static int
1256 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1257 {
1258 	atl_remove_mac_addr(dev, 0);
1259 	atl_add_mac_addr(dev, addr, 0, 0);
1260 	return 0;
1261 }
1262 
1263 static int
1264 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1265 {
1266 	struct rte_eth_dev_info dev_info;
1267 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1268 
1269 	atl_dev_info_get(dev, &dev_info);
1270 
1271 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1272 		return -EINVAL;
1273 
1274 	/* update max frame size */
1275 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1276 
1277 	return 0;
1278 }
1279 
1280 static int
1281 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1282 {
1283 	struct aq_hw_cfg_s *cfg =
1284 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1285 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1286 	int err = 0;
1287 	int i = 0;
1288 
1289 	PMD_INIT_FUNC_TRACE();
1290 
1291 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1292 		if (cfg->vlan_filter[i] == vlan_id) {
1293 			if (!on) {
1294 				/* Disable VLAN filter. */
1295 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1296 
1297 				/* Clear VLAN filter entry */
1298 				cfg->vlan_filter[i] = 0;
1299 			}
1300 			break;
1301 		}
1302 	}
1303 
1304 	/* VLAN_ID was not found. So, nothing to delete. */
1305 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1306 		goto exit;
1307 
1308 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1309 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1310 		goto exit;
1311 
1312 	/* Try to found free VLAN filter to add new VLAN_ID */
1313 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1314 		if (cfg->vlan_filter[i] == 0)
1315 			break;
1316 	}
1317 
1318 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1319 		/* We have no free VLAN filter to add new VLAN_ID*/
1320 		err = -ENOMEM;
1321 		goto exit;
1322 	}
1323 
1324 	cfg->vlan_filter[i] = vlan_id;
1325 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1326 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1327 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1328 
1329 exit:
1330 	/* Enable VLAN promisc mode if vlan_filter empty  */
1331 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1332 		if (cfg->vlan_filter[i] != 0)
1333 			break;
1334 	}
1335 
1336 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1337 
1338 	return err;
1339 }
1340 
1341 static int
1342 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1343 {
1344 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1345 	struct aq_hw_cfg_s *cfg =
1346 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1347 	int i;
1348 
1349 	PMD_INIT_FUNC_TRACE();
1350 
1351 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1352 		if (cfg->vlan_filter[i])
1353 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1354 	}
1355 	return 0;
1356 }
1357 
1358 static int
1359 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1360 {
1361 	struct aq_hw_cfg_s *cfg =
1362 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1363 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1364 	int ret = 0;
1365 	int i;
1366 
1367 	PMD_INIT_FUNC_TRACE();
1368 
1369 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1370 
1371 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1372 
1373 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1374 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1375 
1376 	if (mask & ETH_VLAN_EXTEND_MASK)
1377 		ret = -ENOTSUP;
1378 
1379 	return ret;
1380 }
1381 
1382 static int
1383 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1384 		  uint16_t tpid)
1385 {
1386 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1387 	int err = 0;
1388 
1389 	PMD_INIT_FUNC_TRACE();
1390 
1391 	switch (vlan_type) {
1392 	case ETH_VLAN_TYPE_INNER:
1393 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1394 		break;
1395 	case ETH_VLAN_TYPE_OUTER:
1396 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1397 		break;
1398 	default:
1399 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1400 		err = -ENOTSUP;
1401 	}
1402 
1403 	return err;
1404 }
1405 
1406 static void
1407 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1408 {
1409 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1410 
1411 	PMD_INIT_FUNC_TRACE();
1412 
1413 	if (queue_id > dev->data->nb_rx_queues) {
1414 		PMD_DRV_LOG(ERR, "Invalid queue id");
1415 		return;
1416 	}
1417 
1418 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1419 }
1420 
1421 static int
1422 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1423 			  struct ether_addr *mc_addr_set,
1424 			  uint32_t nb_mc_addr)
1425 {
1426 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1427 	u32 i;
1428 
1429 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1430 		return -EINVAL;
1431 
1432 	/* Update whole uc filters table */
1433 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1434 		u8 *mac_addr = NULL;
1435 		u32 l = 0, h = 0;
1436 
1437 		if (i < nb_mc_addr) {
1438 			mac_addr = mc_addr_set[i].addr_bytes;
1439 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1440 				(mac_addr[4] << 8) | mac_addr[5];
1441 			h = (mac_addr[0] << 8) | mac_addr[1];
1442 		}
1443 
1444 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1445 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1446 							HW_ATL_B0_MAC_MIN + i);
1447 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1448 							HW_ATL_B0_MAC_MIN + i);
1449 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1450 					   HW_ATL_B0_MAC_MIN + i);
1451 	}
1452 
1453 	return 0;
1454 }
1455 
1456 static int
1457 atl_reta_update(struct rte_eth_dev *dev,
1458 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1459 		   uint16_t reta_size)
1460 {
1461 	int i;
1462 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1463 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1464 
1465 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1466 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1467 					dev->data->nb_rx_queues - 1);
1468 
1469 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1470 	return 0;
1471 }
1472 
1473 static int
1474 atl_reta_query(struct rte_eth_dev *dev,
1475 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1476 		    uint16_t reta_size)
1477 {
1478 	int i;
1479 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1480 
1481 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1482 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1483 	reta_conf->mask = ~0U;
1484 	return 0;
1485 }
1486 
1487 static int
1488 atl_rss_hash_update(struct rte_eth_dev *dev,
1489 				 struct rte_eth_rss_conf *rss_conf)
1490 {
1491 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1492 	struct aq_hw_cfg_s *cfg =
1493 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1494 	static u8 def_rss_key[40] = {
1495 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1496 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1497 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1498 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1499 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1500 	};
1501 
1502 	cfg->is_rss = !!rss_conf->rss_hf;
1503 	if (rss_conf->rss_key) {
1504 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1505 		       rss_conf->rss_key_len);
1506 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1507 	} else {
1508 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1509 		       sizeof(def_rss_key));
1510 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1511 	}
1512 
1513 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1514 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1515 	return 0;
1516 }
1517 
1518 static int
1519 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1520 				 struct rte_eth_rss_conf *rss_conf)
1521 {
1522 	struct aq_hw_cfg_s *cfg =
1523 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1524 
1525 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1526 	if (rss_conf->rss_key) {
1527 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1528 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1529 		       rss_conf->rss_key_len);
1530 	}
1531 
1532 	return 0;
1533 }
1534 
1535 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1536 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1537 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1538 
1539 RTE_INIT(atl_init_log)
1540 {
1541 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1542 	if (atl_logtype_init >= 0)
1543 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1544 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1545 	if (atl_logtype_driver >= 0)
1546 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1547 }
1548 
1549