xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision ce4e8d418097ebd128edca46f5a1e62e99bfc82f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_ethdev_pci.h>
6 
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
10 #include "atl_logs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
14 
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
17 
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 			       struct rte_eth_dev_info *dev_info);
48 
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
50 
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
52 
53 /* VLAN stuff */
54 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
55 		uint16_t vlan_id, int on);
56 
57 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
58 
59 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
60 				     uint16_t queue_id, int on);
61 
62 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
63 			     enum rte_vlan_type vlan_type, uint16_t tpid);
64 
65 /* EEPROM */
66 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
67 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
68 			      struct rte_dev_eeprom_info *eeprom);
69 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
70 			      struct rte_dev_eeprom_info *eeprom);
71 
72 /* Flow control */
73 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
74 			       struct rte_eth_fc_conf *fc_conf);
75 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
76 			       struct rte_eth_fc_conf *fc_conf);
77 
78 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
79 
80 /* Interrupts */
81 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
82 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
83 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
84 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
85 				    struct rte_intr_handle *handle);
86 static void atl_dev_interrupt_handler(void *param);
87 
88 
89 static int atl_add_mac_addr(struct rte_eth_dev *dev,
90 			    struct ether_addr *mac_addr,
91 			    uint32_t index, uint32_t pool);
92 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
93 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
94 					   struct ether_addr *mac_addr);
95 
96 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
97 				    struct ether_addr *mc_addr_set,
98 				    uint32_t nb_mc_addr);
99 
100 /* RSS */
101 static int atl_reta_update(struct rte_eth_dev *dev,
102 			     struct rte_eth_rss_reta_entry64 *reta_conf,
103 			     uint16_t reta_size);
104 static int atl_reta_query(struct rte_eth_dev *dev,
105 			    struct rte_eth_rss_reta_entry64 *reta_conf,
106 			    uint16_t reta_size);
107 static int atl_rss_hash_update(struct rte_eth_dev *dev,
108 				 struct rte_eth_rss_conf *rss_conf);
109 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
110 				   struct rte_eth_rss_conf *rss_conf);
111 
112 
113 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
114 	struct rte_pci_device *pci_dev);
115 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
116 
117 static void atl_dev_info_get(struct rte_eth_dev *dev,
118 				struct rte_eth_dev_info *dev_info);
119 
120 int atl_logtype_init;
121 int atl_logtype_driver;
122 
123 /*
124  * The set of PCI devices this driver supports
125  */
126 static const struct rte_pci_id pci_id_atl_map[] = {
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
129 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
130 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
131 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
132 
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
138 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
139 
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
145 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
146 
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
148 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
149 	{ .vendor_id = 0, /* sentinel */ },
150 };
151 
152 static struct rte_pci_driver rte_atl_pmd = {
153 	.id_table = pci_id_atl_map,
154 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
155 		     RTE_PCI_DRV_IOVA_AS_VA,
156 	.probe = eth_atl_pci_probe,
157 	.remove = eth_atl_pci_remove,
158 };
159 
160 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
161 			| DEV_RX_OFFLOAD_IPV4_CKSUM \
162 			| DEV_RX_OFFLOAD_UDP_CKSUM \
163 			| DEV_RX_OFFLOAD_TCP_CKSUM \
164 			| DEV_RX_OFFLOAD_JUMBO_FRAME)
165 
166 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
167 			| DEV_TX_OFFLOAD_IPV4_CKSUM \
168 			| DEV_TX_OFFLOAD_UDP_CKSUM \
169 			| DEV_TX_OFFLOAD_TCP_CKSUM \
170 			| DEV_TX_OFFLOAD_TCP_TSO \
171 			| DEV_TX_OFFLOAD_MULTI_SEGS)
172 
173 static const struct rte_eth_desc_lim rx_desc_lim = {
174 	.nb_max = ATL_MAX_RING_DESC,
175 	.nb_min = ATL_MIN_RING_DESC,
176 	.nb_align = ATL_RXD_ALIGN,
177 };
178 
179 static const struct rte_eth_desc_lim tx_desc_lim = {
180 	.nb_max = ATL_MAX_RING_DESC,
181 	.nb_min = ATL_MIN_RING_DESC,
182 	.nb_align = ATL_TXD_ALIGN,
183 	.nb_seg_max = ATL_TX_MAX_SEG,
184 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
185 };
186 
187 #define ATL_XSTATS_FIELD(name) { \
188 	#name, \
189 	offsetof(struct aq_stats_s, name) \
190 }
191 
192 struct atl_xstats_tbl_s {
193 	const char *name;
194 	unsigned int offset;
195 };
196 
197 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
198 	ATL_XSTATS_FIELD(uprc),
199 	ATL_XSTATS_FIELD(mprc),
200 	ATL_XSTATS_FIELD(bprc),
201 	ATL_XSTATS_FIELD(erpt),
202 	ATL_XSTATS_FIELD(uptc),
203 	ATL_XSTATS_FIELD(mptc),
204 	ATL_XSTATS_FIELD(bptc),
205 	ATL_XSTATS_FIELD(erpr),
206 	ATL_XSTATS_FIELD(ubrc),
207 	ATL_XSTATS_FIELD(ubtc),
208 	ATL_XSTATS_FIELD(mbrc),
209 	ATL_XSTATS_FIELD(mbtc),
210 	ATL_XSTATS_FIELD(bbrc),
211 	ATL_XSTATS_FIELD(bbtc),
212 };
213 
214 static const struct eth_dev_ops atl_eth_dev_ops = {
215 	.dev_configure	      = atl_dev_configure,
216 	.dev_start	      = atl_dev_start,
217 	.dev_stop	      = atl_dev_stop,
218 	.dev_set_link_up      = atl_dev_set_link_up,
219 	.dev_set_link_down    = atl_dev_set_link_down,
220 	.dev_close	      = atl_dev_close,
221 	.dev_reset	      = atl_dev_reset,
222 
223 	/* PROMISC */
224 	.promiscuous_enable   = atl_dev_promiscuous_enable,
225 	.promiscuous_disable  = atl_dev_promiscuous_disable,
226 	.allmulticast_enable  = atl_dev_allmulticast_enable,
227 	.allmulticast_disable = atl_dev_allmulticast_disable,
228 
229 	/* Link */
230 	.link_update	      = atl_dev_link_update,
231 
232 	/* Stats */
233 	.stats_get	      = atl_dev_stats_get,
234 	.xstats_get	      = atl_dev_xstats_get,
235 	.xstats_get_names     = atl_dev_xstats_get_names,
236 	.stats_reset	      = atl_dev_stats_reset,
237 	.xstats_reset	      = atl_dev_stats_reset,
238 
239 	.fw_version_get       = atl_fw_version_get,
240 	.dev_infos_get	      = atl_dev_info_get,
241 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
242 
243 	.mtu_set              = atl_dev_mtu_set,
244 
245 	/* VLAN */
246 	.vlan_filter_set      = atl_vlan_filter_set,
247 	.vlan_offload_set     = atl_vlan_offload_set,
248 	.vlan_tpid_set        = atl_vlan_tpid_set,
249 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
250 
251 	/* Queue Control */
252 	.rx_queue_start	      = atl_rx_queue_start,
253 	.rx_queue_stop	      = atl_rx_queue_stop,
254 	.rx_queue_setup       = atl_rx_queue_setup,
255 	.rx_queue_release     = atl_rx_queue_release,
256 
257 	.tx_queue_start	      = atl_tx_queue_start,
258 	.tx_queue_stop	      = atl_tx_queue_stop,
259 	.tx_queue_setup       = atl_tx_queue_setup,
260 	.tx_queue_release     = atl_tx_queue_release,
261 
262 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
263 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
264 
265 	.rx_queue_count       = atl_rx_queue_count,
266 	.rx_descriptor_status = atl_dev_rx_descriptor_status,
267 	.tx_descriptor_status = atl_dev_tx_descriptor_status,
268 
269 	/* EEPROM */
270 	.get_eeprom_length    = atl_dev_get_eeprom_length,
271 	.get_eeprom           = atl_dev_get_eeprom,
272 	.set_eeprom           = atl_dev_set_eeprom,
273 
274 	/* Flow Control */
275 	.flow_ctrl_get	      = atl_flow_ctrl_get,
276 	.flow_ctrl_set	      = atl_flow_ctrl_set,
277 
278 	/* MAC */
279 	.mac_addr_add	      = atl_add_mac_addr,
280 	.mac_addr_remove      = atl_remove_mac_addr,
281 	.mac_addr_set	      = atl_set_default_mac_addr,
282 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
283 	.rxq_info_get	      = atl_rxq_info_get,
284 	.txq_info_get	      = atl_txq_info_get,
285 
286 	.reta_update          = atl_reta_update,
287 	.reta_query           = atl_reta_query,
288 	.rss_hash_update      = atl_rss_hash_update,
289 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
290 };
291 
292 static inline int32_t
293 atl_reset_hw(struct aq_hw_s *hw)
294 {
295 	return hw_atl_b0_hw_reset(hw);
296 }
297 
298 static inline void
299 atl_enable_intr(struct rte_eth_dev *dev)
300 {
301 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
302 
303 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
304 }
305 
306 static void
307 atl_disable_intr(struct aq_hw_s *hw)
308 {
309 	PMD_INIT_FUNC_TRACE();
310 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
311 }
312 
313 static int
314 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
315 {
316 	struct atl_adapter *adapter =
317 		(struct atl_adapter *)eth_dev->data->dev_private;
318 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
319 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
320 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
321 	int err = 0;
322 
323 	PMD_INIT_FUNC_TRACE();
324 
325 	eth_dev->dev_ops = &atl_eth_dev_ops;
326 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
327 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
328 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
329 
330 	/* For secondary processes, the primary process has done all the work */
331 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
332 		return 0;
333 
334 	/* Vendor and Device ID need to be set before init of shared code */
335 	hw->device_id = pci_dev->id.device_id;
336 	hw->vendor_id = pci_dev->id.vendor_id;
337 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
338 
339 	/* Hardware configuration - hardcode */
340 	adapter->hw_cfg.is_lro = false;
341 	adapter->hw_cfg.wol = false;
342 	adapter->hw_cfg.is_rss = false;
343 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
344 
345 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
346 			  AQ_NIC_RATE_5G |
347 			  AQ_NIC_RATE_2G5 |
348 			  AQ_NIC_RATE_1G |
349 			  AQ_NIC_RATE_100M;
350 
351 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
352 	adapter->hw_cfg.aq_rss.indirection_table_size =
353 		HW_ATL_B0_RSS_REDIRECTION_MAX;
354 
355 	hw->aq_nic_cfg = &adapter->hw_cfg;
356 
357 	/* disable interrupt */
358 	atl_disable_intr(hw);
359 
360 	/* Allocate memory for storing MAC addresses */
361 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
362 	if (eth_dev->data->mac_addrs == NULL) {
363 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
364 		return -ENOMEM;
365 	}
366 
367 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
368 	if (err)
369 		return err;
370 
371 	/* Copy the permanent MAC address */
372 	if (hw->aq_fw_ops->get_mac_permanent(hw,
373 			eth_dev->data->mac_addrs->addr_bytes) != 0)
374 		return -EINVAL;
375 
376 	/* Reset the hw statistics */
377 	atl_dev_stats_reset(eth_dev);
378 
379 	rte_intr_callback_register(intr_handle,
380 				   atl_dev_interrupt_handler, eth_dev);
381 
382 	/* enable uio/vfio intr/eventfd mapping */
383 	rte_intr_enable(intr_handle);
384 
385 	/* enable support intr */
386 	atl_enable_intr(eth_dev);
387 
388 	return err;
389 }
390 
391 static int
392 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
393 {
394 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
395 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
396 	struct aq_hw_s *hw;
397 
398 	PMD_INIT_FUNC_TRACE();
399 
400 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
401 		return -EPERM;
402 
403 	hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
404 
405 	if (hw->adapter_stopped == 0)
406 		atl_dev_close(eth_dev);
407 
408 	eth_dev->dev_ops = NULL;
409 	eth_dev->rx_pkt_burst = NULL;
410 	eth_dev->tx_pkt_burst = NULL;
411 
412 	/* disable uio intr before callback unregister */
413 	rte_intr_disable(intr_handle);
414 	rte_intr_callback_unregister(intr_handle,
415 				     atl_dev_interrupt_handler, eth_dev);
416 
417 	rte_free(eth_dev->data->mac_addrs);
418 	eth_dev->data->mac_addrs = NULL;
419 
420 	return 0;
421 }
422 
423 static int
424 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
425 	struct rte_pci_device *pci_dev)
426 {
427 	return rte_eth_dev_pci_generic_probe(pci_dev,
428 		sizeof(struct atl_adapter), eth_atl_dev_init);
429 }
430 
431 static int
432 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
433 {
434 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
435 }
436 
437 static int
438 atl_dev_configure(struct rte_eth_dev *dev)
439 {
440 	struct atl_interrupt *intr =
441 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
442 
443 	PMD_INIT_FUNC_TRACE();
444 
445 	/* set flag to update link status after init */
446 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
447 
448 	return 0;
449 }
450 
451 /*
452  * Configure device link speed and setup link.
453  * It returns 0 on success.
454  */
455 static int
456 atl_dev_start(struct rte_eth_dev *dev)
457 {
458 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
459 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
460 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
461 	uint32_t intr_vector = 0;
462 	uint32_t *link_speeds;
463 	uint32_t speed = 0;
464 	int status;
465 	int err;
466 
467 	PMD_INIT_FUNC_TRACE();
468 
469 	/* set adapter started */
470 	hw->adapter_stopped = 0;
471 
472 	if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
473 		PMD_INIT_LOG(ERR,
474 		"Invalid link_speeds for port %u, fix speed not supported",
475 				dev->data->port_id);
476 		return -EINVAL;
477 	}
478 
479 	/* disable uio/vfio intr/eventfd mapping */
480 	rte_intr_disable(intr_handle);
481 
482 	/* reinitialize adapter
483 	 * this calls reset and start
484 	 */
485 	status = atl_reset_hw(hw);
486 	if (status != 0)
487 		return -EIO;
488 
489 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
490 
491 	hw_atl_b0_hw_start(hw);
492 	/* check and configure queue intr-vector mapping */
493 	if ((rte_intr_cap_multiple(intr_handle) ||
494 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
495 	    dev->data->dev_conf.intr_conf.rxq != 0) {
496 		intr_vector = dev->data->nb_rx_queues;
497 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
498 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
499 					ATL_MAX_INTR_QUEUE_NUM);
500 			return -ENOTSUP;
501 		}
502 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
503 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
504 			return -1;
505 		}
506 	}
507 
508 	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
509 		intr_handle->intr_vec = rte_zmalloc("intr_vec",
510 				    dev->data->nb_rx_queues * sizeof(int), 0);
511 		if (intr_handle->intr_vec == NULL) {
512 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
513 				     " intr_vec", dev->data->nb_rx_queues);
514 			return -ENOMEM;
515 		}
516 	}
517 
518 	/* initialize transmission unit */
519 	atl_tx_init(dev);
520 
521 	/* This can fail when allocating mbufs for descriptor rings */
522 	err = atl_rx_init(dev);
523 	if (err) {
524 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
525 		goto error;
526 	}
527 
528 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
529 		hw->fw_ver_actual >> 24,
530 		(hw->fw_ver_actual >> 16) & 0xFF,
531 		hw->fw_ver_actual & 0xFFFF);
532 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
533 
534 	err = atl_start_queues(dev);
535 	if (err < 0) {
536 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
537 		goto error;
538 	}
539 
540 	err = hw->aq_fw_ops->update_link_status(hw);
541 
542 	if (err)
543 		goto error;
544 
545 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
546 
547 	link_speeds = &dev->data->dev_conf.link_speeds;
548 
549 	speed = 0x0;
550 
551 	if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
552 		speed = hw->aq_nic_cfg->link_speed_msk;
553 	} else {
554 		if (*link_speeds & ETH_LINK_SPEED_10G)
555 			speed |= AQ_NIC_RATE_10G;
556 		if (*link_speeds & ETH_LINK_SPEED_5G)
557 			speed |= AQ_NIC_RATE_5G;
558 		if (*link_speeds & ETH_LINK_SPEED_1G)
559 			speed |= AQ_NIC_RATE_1G;
560 		if (*link_speeds & ETH_LINK_SPEED_2_5G)
561 			speed |=  AQ_NIC_RATE_2G5;
562 		if (*link_speeds & ETH_LINK_SPEED_100M)
563 			speed |= AQ_NIC_RATE_100M;
564 	}
565 
566 	err = hw->aq_fw_ops->set_link_speed(hw, speed);
567 	if (err)
568 		goto error;
569 
570 	if (rte_intr_allow_others(intr_handle)) {
571 		/* check if lsc interrupt is enabled */
572 		if (dev->data->dev_conf.intr_conf.lsc != 0)
573 			atl_dev_lsc_interrupt_setup(dev, true);
574 		else
575 			atl_dev_lsc_interrupt_setup(dev, false);
576 	} else {
577 		rte_intr_callback_unregister(intr_handle,
578 					     atl_dev_interrupt_handler, dev);
579 		if (dev->data->dev_conf.intr_conf.lsc != 0)
580 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
581 				     " no intr multiplex");
582 	}
583 
584 	/* check if rxq interrupt is enabled */
585 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
586 	    rte_intr_dp_is_en(intr_handle))
587 		atl_dev_rxq_interrupt_setup(dev);
588 
589 	/* enable uio/vfio intr/eventfd mapping */
590 	rte_intr_enable(intr_handle);
591 
592 	/* resume enabled intr since hw reset */
593 	atl_enable_intr(dev);
594 
595 	return 0;
596 
597 error:
598 	atl_stop_queues(dev);
599 	return -EIO;
600 }
601 
602 /*
603  * Stop device: disable rx and tx functions to allow for reconfiguring.
604  */
605 static void
606 atl_dev_stop(struct rte_eth_dev *dev)
607 {
608 	struct rte_eth_link link;
609 	struct aq_hw_s *hw =
610 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
611 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
612 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
613 
614 	PMD_INIT_FUNC_TRACE();
615 
616 	/* disable interrupts */
617 	atl_disable_intr(hw);
618 
619 	/* reset the NIC */
620 	atl_reset_hw(hw);
621 	hw->adapter_stopped = 1;
622 
623 	atl_stop_queues(dev);
624 
625 	/* Clear stored conf */
626 	dev->data->scattered_rx = 0;
627 	dev->data->lro = 0;
628 
629 	/* Clear recorded link status */
630 	memset(&link, 0, sizeof(link));
631 	rte_eth_linkstatus_set(dev, &link);
632 
633 	if (!rte_intr_allow_others(intr_handle))
634 		/* resume to the default handler */
635 		rte_intr_callback_register(intr_handle,
636 					   atl_dev_interrupt_handler,
637 					   (void *)dev);
638 
639 	/* Clean datapath event and queue/vec mapping */
640 	rte_intr_efd_disable(intr_handle);
641 	if (intr_handle->intr_vec != NULL) {
642 		rte_free(intr_handle->intr_vec);
643 		intr_handle->intr_vec = NULL;
644 	}
645 }
646 
647 /*
648  * Set device link up: enable tx.
649  */
650 static int
651 atl_dev_set_link_up(struct rte_eth_dev *dev)
652 {
653 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
654 
655 	return hw->aq_fw_ops->set_link_speed(hw,
656 			hw->aq_nic_cfg->link_speed_msk);
657 }
658 
659 /*
660  * Set device link down: disable tx.
661  */
662 static int
663 atl_dev_set_link_down(struct rte_eth_dev *dev)
664 {
665 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
666 
667 	return hw->aq_fw_ops->set_link_speed(hw, 0);
668 }
669 
670 /*
671  * Reset and stop device.
672  */
673 static void
674 atl_dev_close(struct rte_eth_dev *dev)
675 {
676 	PMD_INIT_FUNC_TRACE();
677 
678 	atl_dev_stop(dev);
679 
680 	atl_free_queues(dev);
681 }
682 
683 static int
684 atl_dev_reset(struct rte_eth_dev *dev)
685 {
686 	int ret;
687 
688 	ret = eth_atl_dev_uninit(dev);
689 	if (ret)
690 		return ret;
691 
692 	ret = eth_atl_dev_init(dev);
693 
694 	return ret;
695 }
696 
697 
698 static int
699 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
700 {
701 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
702 	struct aq_hw_s *hw = &adapter->hw;
703 	struct atl_sw_stats *swstats = &adapter->sw_stats;
704 	unsigned int i;
705 
706 	hw->aq_fw_ops->update_stats(hw);
707 
708 	/* Fill out the rte_eth_stats statistics structure */
709 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
710 	stats->ibytes = hw->curr_stats.dma_oct_rc;
711 	stats->imissed = hw->curr_stats.dpc;
712 	stats->ierrors = hw->curr_stats.erpt;
713 
714 	stats->opackets = hw->curr_stats.dma_pkt_tc;
715 	stats->obytes = hw->curr_stats.dma_oct_tc;
716 	stats->oerrors = 0;
717 
718 	stats->rx_nombuf = swstats->rx_nombuf;
719 
720 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
721 		stats->q_ipackets[i] = swstats->q_ipackets[i];
722 		stats->q_opackets[i] = swstats->q_opackets[i];
723 		stats->q_ibytes[i] = swstats->q_ibytes[i];
724 		stats->q_obytes[i] = swstats->q_obytes[i];
725 		stats->q_errors[i] = swstats->q_errors[i];
726 	}
727 	return 0;
728 }
729 
730 static void
731 atl_dev_stats_reset(struct rte_eth_dev *dev)
732 {
733 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
734 	struct aq_hw_s *hw = &adapter->hw;
735 
736 	hw->aq_fw_ops->update_stats(hw);
737 
738 	/* Reset software totals */
739 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
740 
741 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
742 }
743 
744 static int
745 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
746 			 struct rte_eth_xstat_name *xstats_names,
747 			 unsigned int size)
748 {
749 	unsigned int i;
750 
751 	if (!xstats_names)
752 		return RTE_DIM(atl_xstats_tbl);
753 
754 	for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
755 		snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
756 			atl_xstats_tbl[i].name);
757 
758 	return size;
759 }
760 
761 static int
762 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
763 		   unsigned int n)
764 {
765 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
766 	struct aq_hw_s *hw = &adapter->hw;
767 	unsigned int i;
768 
769 	if (!stats)
770 		return 0;
771 
772 	for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
773 		stats[i].id = i;
774 		stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
775 					atl_xstats_tbl[i].offset);
776 	}
777 
778 	return n;
779 }
780 
781 static int
782 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
783 {
784 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
785 	uint32_t fw_ver = 0;
786 	unsigned int ret = 0;
787 
788 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
789 	if (ret)
790 		return -EIO;
791 
792 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
793 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
794 
795 	ret += 1; /* add string null-terminator */
796 
797 	if (fw_size < ret)
798 		return ret;
799 
800 	return 0;
801 }
802 
803 static void
804 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
805 {
806 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
807 
808 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
809 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
810 
811 	dev_info->min_rx_bufsize = 1024;
812 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
813 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
814 	dev_info->max_vfs = pci_dev->max_vfs;
815 
816 	dev_info->max_hash_mac_addrs = 0;
817 	dev_info->max_vmdq_pools = 0;
818 	dev_info->vmdq_queue_num = 0;
819 
820 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
821 
822 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
823 
824 
825 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
826 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
827 	};
828 
829 	dev_info->default_txconf = (struct rte_eth_txconf) {
830 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
831 	};
832 
833 	dev_info->rx_desc_lim = rx_desc_lim;
834 	dev_info->tx_desc_lim = tx_desc_lim;
835 
836 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
837 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
838 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
839 
840 	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
841 	dev_info->speed_capa |= ETH_LINK_SPEED_100M;
842 	dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
843 	dev_info->speed_capa |= ETH_LINK_SPEED_5G;
844 }
845 
846 static const uint32_t *
847 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
848 {
849 	static const uint32_t ptypes[] = {
850 		RTE_PTYPE_L2_ETHER,
851 		RTE_PTYPE_L2_ETHER_ARP,
852 		RTE_PTYPE_L2_ETHER_VLAN,
853 		RTE_PTYPE_L3_IPV4,
854 		RTE_PTYPE_L3_IPV6,
855 		RTE_PTYPE_L4_TCP,
856 		RTE_PTYPE_L4_UDP,
857 		RTE_PTYPE_L4_SCTP,
858 		RTE_PTYPE_L4_ICMP,
859 		RTE_PTYPE_UNKNOWN
860 	};
861 
862 	if (dev->rx_pkt_burst == atl_recv_pkts)
863 		return ptypes;
864 
865 	return NULL;
866 }
867 
868 /* return 0 means link status changed, -1 means not changed */
869 static int
870 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
871 {
872 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
873 	struct atl_interrupt *intr =
874 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
875 	struct rte_eth_link link, old;
876 	int err = 0;
877 
878 	link.link_status = ETH_LINK_DOWN;
879 	link.link_speed = 0;
880 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
881 	link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
882 	memset(&old, 0, sizeof(old));
883 
884 	/* load old link status */
885 	rte_eth_linkstatus_get(dev, &old);
886 
887 	/* read current link status */
888 	err = hw->aq_fw_ops->update_link_status(hw);
889 
890 	if (err)
891 		return 0;
892 
893 	if (hw->aq_link_status.mbps == 0) {
894 		/* write default (down) link status */
895 		rte_eth_linkstatus_set(dev, &link);
896 		if (link.link_status == old.link_status)
897 			return -1;
898 		return 0;
899 	}
900 
901 	intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
902 
903 	link.link_status = ETH_LINK_UP;
904 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
905 	link.link_speed = hw->aq_link_status.mbps;
906 
907 	rte_eth_linkstatus_set(dev, &link);
908 
909 	if (link.link_status == old.link_status)
910 		return -1;
911 
912 	return 0;
913 }
914 
915 static void
916 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
917 {
918 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
919 
920 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
921 }
922 
923 static void
924 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
925 {
926 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
927 
928 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
929 }
930 
931 static void
932 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
933 {
934 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
935 
936 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
937 }
938 
939 static void
940 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
941 {
942 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
943 
944 	if (dev->data->promiscuous == 1)
945 		return; /* must remain in all_multicast mode */
946 
947 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
948 }
949 
950 /**
951  * It clears the interrupt causes and enables the interrupt.
952  * It will be called once only during nic initialized.
953  *
954  * @param dev
955  *  Pointer to struct rte_eth_dev.
956  * @param on
957  *  Enable or Disable.
958  *
959  * @return
960  *  - On success, zero.
961  *  - On failure, a negative value.
962  */
963 
964 static int
965 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
966 {
967 	atl_dev_link_status_print(dev);
968 	return 0;
969 }
970 
971 static int
972 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
973 {
974 	return 0;
975 }
976 
977 
978 static int
979 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
980 {
981 	struct atl_interrupt *intr =
982 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
983 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
984 	u64 cause = 0;
985 
986 	hw_atl_b0_hw_irq_read(hw, &cause);
987 
988 	atl_disable_intr(hw);
989 	intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
990 			ATL_FLAG_NEED_LINK_UPDATE : 0;
991 
992 	return 0;
993 }
994 
995 /**
996  * It gets and then prints the link status.
997  *
998  * @param dev
999  *  Pointer to struct rte_eth_dev.
1000  *
1001  * @return
1002  *  - On success, zero.
1003  *  - On failure, a negative value.
1004  */
1005 static void
1006 atl_dev_link_status_print(struct rte_eth_dev *dev)
1007 {
1008 	struct rte_eth_link link;
1009 
1010 	memset(&link, 0, sizeof(link));
1011 	rte_eth_linkstatus_get(dev, &link);
1012 	if (link.link_status) {
1013 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1014 					(int)(dev->data->port_id),
1015 					(unsigned int)link.link_speed,
1016 			link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1017 					"full-duplex" : "half-duplex");
1018 	} else {
1019 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1020 				(int)(dev->data->port_id));
1021 	}
1022 
1023 
1024 #ifdef DEBUG
1025 {
1026 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1027 
1028 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1029 				pci_dev->addr.domain,
1030 				pci_dev->addr.bus,
1031 				pci_dev->addr.devid,
1032 				pci_dev->addr.function);
1033 }
1034 #endif
1035 
1036 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1037 }
1038 
1039 /*
1040  * It executes link_update after knowing an interrupt occurred.
1041  *
1042  * @param dev
1043  *  Pointer to struct rte_eth_dev.
1044  *
1045  * @return
1046  *  - On success, zero.
1047  *  - On failure, a negative value.
1048  */
1049 static int
1050 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1051 			   struct rte_intr_handle *intr_handle)
1052 {
1053 	struct atl_interrupt *intr =
1054 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1055 
1056 	if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1057 		atl_dev_link_update(dev, 0);
1058 		intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1059 		atl_dev_link_status_print(dev);
1060 		_rte_eth_dev_callback_process(dev,
1061 			RTE_ETH_EVENT_INTR_LSC, NULL);
1062 	}
1063 
1064 	atl_enable_intr(dev);
1065 	rte_intr_enable(intr_handle);
1066 
1067 	return 0;
1068 }
1069 
1070 /**
1071  * Interrupt handler triggered by NIC  for handling
1072  * specific interrupt.
1073  *
1074  * @param handle
1075  *  Pointer to interrupt handle.
1076  * @param param
1077  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1078  *
1079  * @return
1080  *  void
1081  */
1082 static void
1083 atl_dev_interrupt_handler(void *param)
1084 {
1085 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1086 
1087 	atl_dev_interrupt_get_status(dev);
1088 	atl_dev_interrupt_action(dev, dev->intr_handle);
1089 }
1090 
1091 #define SFP_EEPROM_SIZE 0xff
1092 
1093 static int
1094 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1095 {
1096 	return SFP_EEPROM_SIZE;
1097 }
1098 
1099 static int
1100 atl_dev_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1101 {
1102 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1103 
1104 	if (hw->aq_fw_ops->get_eeprom == NULL)
1105 		return -ENOTSUP;
1106 
1107 	if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1108 		return -EINVAL;
1109 
1110 	return hw->aq_fw_ops->get_eeprom(hw, eeprom->data, eeprom->length);
1111 }
1112 
1113 static int
1114 atl_dev_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1115 {
1116 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1117 
1118 	if (hw->aq_fw_ops->set_eeprom == NULL)
1119 		return -ENOTSUP;
1120 
1121 	if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1122 		return -EINVAL;
1123 
1124 	return hw->aq_fw_ops->set_eeprom(hw, eeprom->data, eeprom->length);
1125 }
1126 
1127 static int
1128 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1129 {
1130 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1131 
1132 	if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1133 		fc_conf->mode = RTE_FC_NONE;
1134 	else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1135 		fc_conf->mode = RTE_FC_FULL;
1136 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1137 		fc_conf->mode = RTE_FC_RX_PAUSE;
1138 	else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1139 		fc_conf->mode = RTE_FC_TX_PAUSE;
1140 
1141 	return 0;
1142 }
1143 
1144 static int
1145 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1146 {
1147 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1148 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1149 
1150 
1151 	if (hw->aq_fw_ops->set_flow_control == NULL)
1152 		return -ENOTSUP;
1153 
1154 	if (fc_conf->mode == RTE_FC_NONE)
1155 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1156 	else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1157 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1158 	else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1159 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1160 	else if (fc_conf->mode == RTE_FC_FULL)
1161 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1162 
1163 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1164 		return hw->aq_fw_ops->set_flow_control(hw);
1165 
1166 	return 0;
1167 }
1168 
1169 static int
1170 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1171 		    u8 *mac_addr, bool enable)
1172 {
1173 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1174 	unsigned int h = 0U;
1175 	unsigned int l = 0U;
1176 	int err;
1177 
1178 	if (mac_addr) {
1179 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1180 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1181 			(mac_addr[4] << 8) | mac_addr[5];
1182 	}
1183 
1184 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1185 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1186 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1187 
1188 	if (enable)
1189 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1190 
1191 	err = aq_hw_err_from_flags(hw);
1192 
1193 	return err;
1194 }
1195 
1196 static int
1197 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1198 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1199 {
1200 	if (is_zero_ether_addr(mac_addr)) {
1201 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1202 		return -EINVAL;
1203 	}
1204 
1205 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1206 }
1207 
1208 static void
1209 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1210 {
1211 	atl_update_mac_addr(dev, index, NULL, false);
1212 }
1213 
1214 static int
1215 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1216 {
1217 	atl_remove_mac_addr(dev, 0);
1218 	atl_add_mac_addr(dev, addr, 0, 0);
1219 	return 0;
1220 }
1221 
1222 static int
1223 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1224 {
1225 	struct rte_eth_dev_info dev_info;
1226 	uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1227 
1228 	atl_dev_info_get(dev, &dev_info);
1229 
1230 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1231 		return -EINVAL;
1232 
1233 	/* update max frame size */
1234 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1235 
1236 	return 0;
1237 }
1238 
1239 static int
1240 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1241 {
1242 	struct aq_hw_cfg_s *cfg =
1243 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1244 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1245 	int err = 0;
1246 	int i = 0;
1247 
1248 	PMD_INIT_FUNC_TRACE();
1249 
1250 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1251 		if (cfg->vlan_filter[i] == vlan_id) {
1252 			if (!on) {
1253 				/* Disable VLAN filter. */
1254 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1255 
1256 				/* Clear VLAN filter entry */
1257 				cfg->vlan_filter[i] = 0;
1258 			}
1259 			break;
1260 		}
1261 	}
1262 
1263 	/* VLAN_ID was not found. So, nothing to delete. */
1264 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1265 		goto exit;
1266 
1267 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1268 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1269 		goto exit;
1270 
1271 	/* Try to found free VLAN filter to add new VLAN_ID */
1272 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1273 		if (cfg->vlan_filter[i] == 0)
1274 			break;
1275 	}
1276 
1277 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1278 		/* We have no free VLAN filter to add new VLAN_ID*/
1279 		err = -ENOMEM;
1280 		goto exit;
1281 	}
1282 
1283 	cfg->vlan_filter[i] = vlan_id;
1284 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1285 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1286 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1287 
1288 exit:
1289 	/* Enable VLAN promisc mode if vlan_filter empty  */
1290 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1291 		if (cfg->vlan_filter[i] != 0)
1292 			break;
1293 	}
1294 
1295 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1296 
1297 	return err;
1298 }
1299 
1300 static int
1301 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1302 {
1303 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1304 	struct aq_hw_cfg_s *cfg =
1305 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1306 	int i;
1307 
1308 	PMD_INIT_FUNC_TRACE();
1309 
1310 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1311 		if (cfg->vlan_filter[i])
1312 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1313 	}
1314 	return 0;
1315 }
1316 
1317 static int
1318 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1319 {
1320 	struct aq_hw_cfg_s *cfg =
1321 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1322 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1323 	int ret = 0;
1324 	int i;
1325 
1326 	PMD_INIT_FUNC_TRACE();
1327 
1328 	ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1329 
1330 	cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1331 
1332 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1333 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1334 
1335 	if (mask & ETH_VLAN_EXTEND_MASK)
1336 		ret = -ENOTSUP;
1337 
1338 	return ret;
1339 }
1340 
1341 static int
1342 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1343 		  uint16_t tpid)
1344 {
1345 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1346 	int err = 0;
1347 
1348 	PMD_INIT_FUNC_TRACE();
1349 
1350 	switch (vlan_type) {
1351 	case ETH_VLAN_TYPE_INNER:
1352 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1353 		break;
1354 	case ETH_VLAN_TYPE_OUTER:
1355 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1356 		break;
1357 	default:
1358 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1359 		err = -ENOTSUP;
1360 	}
1361 
1362 	return err;
1363 }
1364 
1365 static void
1366 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1367 {
1368 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1369 
1370 	PMD_INIT_FUNC_TRACE();
1371 
1372 	if (queue_id > dev->data->nb_rx_queues) {
1373 		PMD_DRV_LOG(ERR, "Invalid queue id");
1374 		return;
1375 	}
1376 
1377 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1378 }
1379 
1380 static int
1381 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1382 			  struct ether_addr *mc_addr_set,
1383 			  uint32_t nb_mc_addr)
1384 {
1385 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1386 	u32 i;
1387 
1388 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1389 		return -EINVAL;
1390 
1391 	/* Update whole uc filters table */
1392 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1393 		u8 *mac_addr = NULL;
1394 		u32 l = 0, h = 0;
1395 
1396 		if (i < nb_mc_addr) {
1397 			mac_addr = mc_addr_set[i].addr_bytes;
1398 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1399 				(mac_addr[4] << 8) | mac_addr[5];
1400 			h = (mac_addr[0] << 8) | mac_addr[1];
1401 		}
1402 
1403 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1404 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1405 							HW_ATL_B0_MAC_MIN + i);
1406 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1407 							HW_ATL_B0_MAC_MIN + i);
1408 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1409 					   HW_ATL_B0_MAC_MIN + i);
1410 	}
1411 
1412 	return 0;
1413 }
1414 
1415 static int
1416 atl_reta_update(struct rte_eth_dev *dev,
1417 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1418 		   uint16_t reta_size)
1419 {
1420 	int i;
1421 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1422 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1423 
1424 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1425 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1426 					dev->data->nb_rx_queues - 1);
1427 
1428 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1429 	return 0;
1430 }
1431 
1432 static int
1433 atl_reta_query(struct rte_eth_dev *dev,
1434 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1435 		    uint16_t reta_size)
1436 {
1437 	int i;
1438 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1439 
1440 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1441 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1442 	reta_conf->mask = ~0U;
1443 	return 0;
1444 }
1445 
1446 static int
1447 atl_rss_hash_update(struct rte_eth_dev *dev,
1448 				 struct rte_eth_rss_conf *rss_conf)
1449 {
1450 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1451 	struct aq_hw_cfg_s *cfg =
1452 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1453 	static u8 def_rss_key[40] = {
1454 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1455 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1456 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1457 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1458 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1459 	};
1460 
1461 	cfg->is_rss = !!rss_conf->rss_hf;
1462 	if (rss_conf->rss_key) {
1463 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1464 		       rss_conf->rss_key_len);
1465 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1466 	} else {
1467 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1468 		       sizeof(def_rss_key));
1469 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1470 	}
1471 
1472 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1473 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1474 	return 0;
1475 }
1476 
1477 static int
1478 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1479 				 struct rte_eth_rss_conf *rss_conf)
1480 {
1481 	struct aq_hw_cfg_s *cfg =
1482 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1483 
1484 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1485 	if (rss_conf->rss_key) {
1486 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1487 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1488 		       rss_conf->rss_key_len);
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1495 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1496 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1497 
1498 RTE_INIT(atl_init_log)
1499 {
1500 	atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1501 	if (atl_logtype_init >= 0)
1502 		rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1503 	atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1504 	if (atl_logtype_driver >= 0)
1505 		rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);
1506 }
1507 
1508