xref: /dpdk/drivers/net/atlantic/atl_ethdev.c (revision ba6a168a06581b5b3d523f984722a3e5f65bbb82)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4 
5 #include <rte_string_fns.h>
6 #include <ethdev_pci.h>
7 #include <rte_alarm.h>
8 
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16 
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static int atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static int  atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30 
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 				    struct rte_eth_xstat_name *xstats_names,
33 				    unsigned int size);
34 
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 				struct rte_eth_stats *stats);
37 
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 			      struct rte_eth_xstat *stats, unsigned int n);
40 
41 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
42 
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44 			      size_t fw_size);
45 
46 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev,
47 						    size_t *no_of_elements);
48 
49 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
50 
51 /* VLAN stuff */
52 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
53 		uint16_t vlan_id, int on);
54 
55 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
56 
57 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
58 				     uint16_t queue_id, int on);
59 
60 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
61 			     enum rte_vlan_type vlan_type, uint16_t tpid);
62 
63 /* EEPROM */
64 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
65 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
66 			      struct rte_dev_eeprom_info *eeprom);
67 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
68 			      struct rte_dev_eeprom_info *eeprom);
69 
70 /* Regs */
71 static int atl_dev_get_regs(struct rte_eth_dev *dev,
72 			    struct rte_dev_reg_info *regs);
73 
74 /* Flow control */
75 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
76 			       struct rte_eth_fc_conf *fc_conf);
77 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
78 			       struct rte_eth_fc_conf *fc_conf);
79 
80 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
81 
82 /* Interrupts */
83 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
84 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
85 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
86 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
87 				    struct rte_intr_handle *handle);
88 static void atl_dev_interrupt_handler(void *param);
89 
90 
91 static int atl_add_mac_addr(struct rte_eth_dev *dev,
92 			    struct rte_ether_addr *mac_addr,
93 			    uint32_t index, uint32_t pool);
94 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
95 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
96 					   struct rte_ether_addr *mac_addr);
97 
98 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
99 				    struct rte_ether_addr *mc_addr_set,
100 				    uint32_t nb_mc_addr);
101 
102 /* RSS */
103 static int atl_reta_update(struct rte_eth_dev *dev,
104 			     struct rte_eth_rss_reta_entry64 *reta_conf,
105 			     uint16_t reta_size);
106 static int atl_reta_query(struct rte_eth_dev *dev,
107 			    struct rte_eth_rss_reta_entry64 *reta_conf,
108 			    uint16_t reta_size);
109 static int atl_rss_hash_update(struct rte_eth_dev *dev,
110 				 struct rte_eth_rss_conf *rss_conf);
111 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
112 				   struct rte_eth_rss_conf *rss_conf);
113 
114 
115 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
116 	struct rte_pci_device *pci_dev);
117 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
118 
119 static int atl_dev_info_get(struct rte_eth_dev *dev,
120 				struct rte_eth_dev_info *dev_info);
121 
122 /*
123  * The set of PCI devices this driver supports
124  */
125 static const struct rte_pci_id pci_id_atl_map[] = {
126 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
127 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
128 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
129 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
130 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
131 
132 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
133 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
134 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
135 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
136 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
137 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
138 
139 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
140 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
141 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
142 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
143 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
144 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
145 
146 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
147 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
148 	{ .vendor_id = 0, /* sentinel */ },
149 };
150 
151 static struct rte_pci_driver rte_atl_pmd = {
152 	.id_table = pci_id_atl_map,
153 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
154 	.probe = eth_atl_pci_probe,
155 	.remove = eth_atl_pci_remove,
156 };
157 
158 #define ATL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_STRIP \
159 			| RTE_ETH_RX_OFFLOAD_IPV4_CKSUM \
160 			| RTE_ETH_RX_OFFLOAD_UDP_CKSUM \
161 			| RTE_ETH_RX_OFFLOAD_TCP_CKSUM \
162 			| RTE_ETH_RX_OFFLOAD_MACSEC_STRIP \
163 			| RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
164 
165 #define ATL_TX_OFFLOADS (RTE_ETH_TX_OFFLOAD_VLAN_INSERT \
166 			| RTE_ETH_TX_OFFLOAD_IPV4_CKSUM \
167 			| RTE_ETH_TX_OFFLOAD_UDP_CKSUM \
168 			| RTE_ETH_TX_OFFLOAD_TCP_CKSUM \
169 			| RTE_ETH_TX_OFFLOAD_TCP_TSO \
170 			| RTE_ETH_TX_OFFLOAD_MACSEC_INSERT \
171 			| RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
172 
173 #define SFP_EEPROM_SIZE 0x100
174 
175 static const struct rte_eth_desc_lim rx_desc_lim = {
176 	.nb_max = ATL_MAX_RING_DESC,
177 	.nb_min = ATL_MIN_RING_DESC,
178 	.nb_align = ATL_RXD_ALIGN,
179 };
180 
181 static const struct rte_eth_desc_lim tx_desc_lim = {
182 	.nb_max = ATL_MAX_RING_DESC,
183 	.nb_min = ATL_MIN_RING_DESC,
184 	.nb_align = ATL_TXD_ALIGN,
185 	.nb_seg_max = ATL_TX_MAX_SEG,
186 	.nb_mtu_seg_max = ATL_TX_MAX_SEG,
187 };
188 
189 enum atl_xstats_type {
190 	XSTATS_TYPE_MSM = 0,
191 	XSTATS_TYPE_MACSEC,
192 };
193 
194 #define ATL_XSTATS_FIELD(name) { \
195 	#name, \
196 	offsetof(struct aq_stats_s, name), \
197 	XSTATS_TYPE_MSM \
198 }
199 
200 #define ATL_MACSEC_XSTATS_FIELD(name) { \
201 	#name, \
202 	offsetof(struct macsec_stats, name), \
203 	XSTATS_TYPE_MACSEC \
204 }
205 
206 struct atl_xstats_tbl_s {
207 	const char *name;
208 	unsigned int offset;
209 	enum atl_xstats_type type;
210 };
211 
212 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
213 	ATL_XSTATS_FIELD(uprc),
214 	ATL_XSTATS_FIELD(mprc),
215 	ATL_XSTATS_FIELD(bprc),
216 	ATL_XSTATS_FIELD(erpt),
217 	ATL_XSTATS_FIELD(uptc),
218 	ATL_XSTATS_FIELD(mptc),
219 	ATL_XSTATS_FIELD(bptc),
220 	ATL_XSTATS_FIELD(erpr),
221 	ATL_XSTATS_FIELD(ubrc),
222 	ATL_XSTATS_FIELD(ubtc),
223 	ATL_XSTATS_FIELD(mbrc),
224 	ATL_XSTATS_FIELD(mbtc),
225 	ATL_XSTATS_FIELD(bbrc),
226 	ATL_XSTATS_FIELD(bbtc),
227 	/* Ingress Common Counters */
228 	ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
229 	ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
230 	ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
231 	ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
232 	ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
233 	ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
234 	ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
235 	ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
236 	/* Ingress SA Counters */
237 	ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
238 	ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
239 	ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
240 	ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
241 	ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
242 	ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
243 	ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
244 	ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
245 	ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
246 	/* Egress Common Counters */
247 	ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
248 	ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
249 	ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
250 	ATL_MACSEC_XSTATS_FIELD(out_too_long),
251 	/* Egress SC Counters */
252 	ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
253 	ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
254 	/* Egress SA Counters */
255 	ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
256 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
257 	ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
258 	ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
259 };
260 
261 static const struct eth_dev_ops atl_eth_dev_ops = {
262 	.dev_configure	      = atl_dev_configure,
263 	.dev_start	      = atl_dev_start,
264 	.dev_stop	      = atl_dev_stop,
265 	.dev_set_link_up      = atl_dev_set_link_up,
266 	.dev_set_link_down    = atl_dev_set_link_down,
267 	.dev_close	      = atl_dev_close,
268 	.dev_reset	      = atl_dev_reset,
269 
270 	/* PROMISC */
271 	.promiscuous_enable   = atl_dev_promiscuous_enable,
272 	.promiscuous_disable  = atl_dev_promiscuous_disable,
273 	.allmulticast_enable  = atl_dev_allmulticast_enable,
274 	.allmulticast_disable = atl_dev_allmulticast_disable,
275 
276 	/* Link */
277 	.link_update	      = atl_dev_link_update,
278 
279 	.get_reg              = atl_dev_get_regs,
280 
281 	/* Stats */
282 	.stats_get	      = atl_dev_stats_get,
283 	.xstats_get	      = atl_dev_xstats_get,
284 	.xstats_get_names     = atl_dev_xstats_get_names,
285 	.stats_reset	      = atl_dev_stats_reset,
286 	.xstats_reset	      = atl_dev_stats_reset,
287 
288 	.fw_version_get       = atl_fw_version_get,
289 	.dev_infos_get	      = atl_dev_info_get,
290 	.dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
291 
292 	.mtu_set              = atl_dev_mtu_set,
293 
294 	/* VLAN */
295 	.vlan_filter_set      = atl_vlan_filter_set,
296 	.vlan_offload_set     = atl_vlan_offload_set,
297 	.vlan_tpid_set        = atl_vlan_tpid_set,
298 	.vlan_strip_queue_set = atl_vlan_strip_queue_set,
299 
300 	/* Queue Control */
301 	.rx_queue_start	      = atl_rx_queue_start,
302 	.rx_queue_stop	      = atl_rx_queue_stop,
303 	.rx_queue_setup       = atl_rx_queue_setup,
304 	.rx_queue_release     = atl_rx_queue_release,
305 
306 	.tx_queue_start	      = atl_tx_queue_start,
307 	.tx_queue_stop	      = atl_tx_queue_stop,
308 	.tx_queue_setup       = atl_tx_queue_setup,
309 	.tx_queue_release     = atl_tx_queue_release,
310 
311 	.rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
312 	.rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
313 
314 	/* EEPROM */
315 	.get_eeprom_length    = atl_dev_get_eeprom_length,
316 	.get_eeprom           = atl_dev_get_eeprom,
317 	.set_eeprom           = atl_dev_set_eeprom,
318 
319 	/* Flow Control */
320 	.flow_ctrl_get	      = atl_flow_ctrl_get,
321 	.flow_ctrl_set	      = atl_flow_ctrl_set,
322 
323 	/* MAC */
324 	.mac_addr_add	      = atl_add_mac_addr,
325 	.mac_addr_remove      = atl_remove_mac_addr,
326 	.mac_addr_set	      = atl_set_default_mac_addr,
327 	.set_mc_addr_list     = atl_dev_set_mc_addr_list,
328 	.rxq_info_get	      = atl_rxq_info_get,
329 	.txq_info_get	      = atl_txq_info_get,
330 
331 	.reta_update          = atl_reta_update,
332 	.reta_query           = atl_reta_query,
333 	.rss_hash_update      = atl_rss_hash_update,
334 	.rss_hash_conf_get    = atl_rss_hash_conf_get,
335 };
336 
337 static inline int32_t
atl_reset_hw(struct aq_hw_s * hw)338 atl_reset_hw(struct aq_hw_s *hw)
339 {
340 	return hw_atl_b0_hw_reset(hw);
341 }
342 
343 static inline void
atl_enable_intr(struct rte_eth_dev * dev)344 atl_enable_intr(struct rte_eth_dev *dev)
345 {
346 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
347 
348 	hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
349 }
350 
351 static void
atl_disable_intr(struct aq_hw_s * hw)352 atl_disable_intr(struct aq_hw_s *hw)
353 {
354 	PMD_INIT_FUNC_TRACE();
355 	hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
356 }
357 
358 static int
eth_atl_dev_init(struct rte_eth_dev * eth_dev)359 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
360 {
361 	struct atl_adapter *adapter = eth_dev->data->dev_private;
362 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
363 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
364 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
365 	int err = 0;
366 
367 	PMD_INIT_FUNC_TRACE();
368 
369 	eth_dev->dev_ops = &atl_eth_dev_ops;
370 
371 	eth_dev->rx_queue_count       = atl_rx_queue_count;
372 	eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
373 	eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
374 
375 	eth_dev->rx_pkt_burst = &atl_recv_pkts;
376 	eth_dev->tx_pkt_burst = &atl_xmit_pkts;
377 	eth_dev->tx_pkt_prepare = &atl_prep_pkts;
378 
379 	/* For secondary processes, the primary process has done all the work */
380 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
381 		return 0;
382 
383 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
384 
385 	/* Vendor and Device ID need to be set before init of shared code */
386 	hw->device_id = pci_dev->id.device_id;
387 	hw->vendor_id = pci_dev->id.vendor_id;
388 	hw->mmio = (void *)pci_dev->mem_resource[0].addr;
389 
390 	/* Hardware configuration - hardcode */
391 	adapter->hw_cfg.is_lro = false;
392 	adapter->hw_cfg.wol = false;
393 	adapter->hw_cfg.is_rss = false;
394 	adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
395 
396 	adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
397 			  AQ_NIC_RATE_5G |
398 			  AQ_NIC_RATE_2G5 |
399 			  AQ_NIC_RATE_1G |
400 			  AQ_NIC_RATE_100M;
401 
402 	adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
403 	adapter->hw_cfg.aq_rss.indirection_table_size =
404 		HW_ATL_B0_RSS_REDIRECTION_MAX;
405 
406 	hw->aq_nic_cfg = &adapter->hw_cfg;
407 
408 	pthread_mutex_init(&hw->mbox_mutex, NULL);
409 
410 	/* disable interrupt */
411 	atl_disable_intr(hw);
412 
413 	/* Allocate memory for storing MAC addresses */
414 	eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415 					RTE_ETHER_ADDR_LEN, 0);
416 	if (eth_dev->data->mac_addrs == NULL) {
417 		PMD_INIT_LOG(ERR, "MAC Malloc failed");
418 		return -ENOMEM;
419 	}
420 
421 	err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
422 	if (err)
423 		return err;
424 
425 	/* Copy the permanent MAC address */
426 	if (hw->aq_fw_ops->get_mac_permanent(hw,
427 			eth_dev->data->mac_addrs->addr_bytes) != 0)
428 		return -EINVAL;
429 
430 	/* Reset the hw statistics */
431 	atl_dev_stats_reset(eth_dev);
432 
433 	rte_intr_callback_register(intr_handle,
434 				   atl_dev_interrupt_handler, eth_dev);
435 
436 	/* enable uio/vfio intr/eventfd mapping */
437 	rte_intr_enable(intr_handle);
438 
439 	/* enable support intr */
440 	atl_enable_intr(eth_dev);
441 
442 	return err;
443 }
444 
445 static int
eth_atl_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)446 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
447 	struct rte_pci_device *pci_dev)
448 {
449 	return rte_eth_dev_pci_generic_probe(pci_dev,
450 		sizeof(struct atl_adapter), eth_atl_dev_init);
451 }
452 
453 static int
eth_atl_pci_remove(struct rte_pci_device * pci_dev)454 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
455 {
456 	return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close);
457 }
458 
459 static int
atl_dev_configure(struct rte_eth_dev * dev)460 atl_dev_configure(struct rte_eth_dev *dev)
461 {
462 	struct atl_interrupt *intr =
463 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
464 
465 	PMD_INIT_FUNC_TRACE();
466 
467 	/* set flag to update link status after init */
468 	intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
469 
470 	return 0;
471 }
472 
473 /*
474  * Configure device link speed and setup link.
475  * It returns 0 on success.
476  */
477 static int
atl_dev_start(struct rte_eth_dev * dev)478 atl_dev_start(struct rte_eth_dev *dev)
479 {
480 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
481 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
482 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
483 	uint32_t intr_vector = 0;
484 	int status;
485 	int err;
486 
487 	PMD_INIT_FUNC_TRACE();
488 
489 	/* set adapter started */
490 	hw->adapter_stopped = 0;
491 
492 	if (dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
493 		PMD_INIT_LOG(ERR,
494 		"Invalid link_speeds for port %u, fix speed not supported",
495 				dev->data->port_id);
496 		return -EINVAL;
497 	}
498 
499 	/* disable uio/vfio intr/eventfd mapping */
500 	rte_intr_disable(intr_handle);
501 
502 	/* reinitialize adapter
503 	 * this calls reset and start
504 	 */
505 	status = atl_reset_hw(hw);
506 	if (status != 0)
507 		return -EIO;
508 
509 	err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
510 
511 	hw_atl_b0_hw_start(hw);
512 	/* check and configure queue intr-vector mapping */
513 	if ((rte_intr_cap_multiple(intr_handle) ||
514 	    !RTE_ETH_DEV_SRIOV(dev).active) &&
515 	    dev->data->dev_conf.intr_conf.rxq != 0) {
516 		intr_vector = dev->data->nb_rx_queues;
517 		if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
518 			PMD_INIT_LOG(ERR, "At most %d intr queues supported",
519 					ATL_MAX_INTR_QUEUE_NUM);
520 			return -ENOTSUP;
521 		}
522 		if (rte_intr_efd_enable(intr_handle, intr_vector)) {
523 			PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
524 			return -1;
525 		}
526 	}
527 
528 	if (rte_intr_dp_is_en(intr_handle)) {
529 		if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
530 						   dev->data->nb_rx_queues)) {
531 			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
532 				     " intr_vec", dev->data->nb_rx_queues);
533 			return -ENOMEM;
534 		}
535 	}
536 
537 	/* initialize transmission unit */
538 	atl_tx_init(dev);
539 
540 	/* This can fail when allocating mbufs for descriptor rings */
541 	err = atl_rx_init(dev);
542 	if (err) {
543 		PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
544 		goto error;
545 	}
546 
547 	PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
548 		hw->fw_ver_actual >> 24,
549 		(hw->fw_ver_actual >> 16) & 0xFF,
550 		hw->fw_ver_actual & 0xFFFF);
551 	PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
552 
553 	err = atl_start_queues(dev);
554 	if (err < 0) {
555 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
556 		goto error;
557 	}
558 
559 	err = atl_dev_set_link_up(dev);
560 
561 	err = hw->aq_fw_ops->update_link_status(hw);
562 
563 	if (err)
564 		goto error;
565 
566 	dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
567 
568 	if (rte_intr_allow_others(intr_handle)) {
569 		/* check if lsc interrupt is enabled */
570 		if (dev->data->dev_conf.intr_conf.lsc != 0)
571 			atl_dev_lsc_interrupt_setup(dev, true);
572 		else
573 			atl_dev_lsc_interrupt_setup(dev, false);
574 	} else {
575 		rte_intr_callback_unregister(intr_handle,
576 					     atl_dev_interrupt_handler, dev);
577 		if (dev->data->dev_conf.intr_conf.lsc != 0)
578 			PMD_INIT_LOG(INFO, "lsc won't enable because of"
579 				     " no intr multiplex");
580 	}
581 
582 	/* check if rxq interrupt is enabled */
583 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
584 	    rte_intr_dp_is_en(intr_handle))
585 		atl_dev_rxq_interrupt_setup(dev);
586 
587 	/* enable uio/vfio intr/eventfd mapping */
588 	rte_intr_enable(intr_handle);
589 
590 	/* resume enabled intr since hw reset */
591 	atl_enable_intr(dev);
592 
593 	return 0;
594 
595 error:
596 	atl_stop_queues(dev);
597 	return -EIO;
598 }
599 
600 /*
601  * Stop device: disable rx and tx functions to allow for reconfiguring.
602  */
603 static int
atl_dev_stop(struct rte_eth_dev * dev)604 atl_dev_stop(struct rte_eth_dev *dev)
605 {
606 	struct rte_eth_link link;
607 	struct aq_hw_s *hw =
608 		ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
609 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
610 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
611 
612 	PMD_INIT_FUNC_TRACE();
613 	dev->data->dev_started = 0;
614 
615 	/* disable interrupts */
616 	atl_disable_intr(hw);
617 
618 	/* reset the NIC */
619 	atl_reset_hw(hw);
620 	hw->adapter_stopped = 1;
621 
622 	atl_stop_queues(dev);
623 
624 	/* Clear stored conf */
625 	dev->data->scattered_rx = 0;
626 	dev->data->lro = 0;
627 
628 	/* Clear recorded link status */
629 	memset(&link, 0, sizeof(link));
630 	rte_eth_linkstatus_set(dev, &link);
631 
632 	if (!rte_intr_allow_others(intr_handle))
633 		/* resume to the default handler */
634 		rte_intr_callback_register(intr_handle,
635 					   atl_dev_interrupt_handler,
636 					   (void *)dev);
637 
638 	/* Clean datapath event and queue/vec mapping */
639 	rte_intr_efd_disable(intr_handle);
640 	rte_intr_vec_list_free(intr_handle);
641 
642 	return 0;
643 }
644 
645 /*
646  * Set device link up: enable tx.
647  */
648 static int
atl_dev_set_link_up(struct rte_eth_dev * dev)649 atl_dev_set_link_up(struct rte_eth_dev *dev)
650 {
651 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
652 	uint32_t link_speeds = dev->data->dev_conf.link_speeds;
653 	uint32_t speed_mask = 0;
654 
655 	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
656 		speed_mask = hw->aq_nic_cfg->link_speed_msk;
657 	} else {
658 		if (link_speeds & RTE_ETH_LINK_SPEED_10G)
659 			speed_mask |= AQ_NIC_RATE_10G;
660 		if (link_speeds & RTE_ETH_LINK_SPEED_5G)
661 			speed_mask |= AQ_NIC_RATE_5G;
662 		if (link_speeds & RTE_ETH_LINK_SPEED_1G)
663 			speed_mask |= AQ_NIC_RATE_1G;
664 		if (link_speeds & RTE_ETH_LINK_SPEED_2_5G)
665 			speed_mask |=  AQ_NIC_RATE_2G5;
666 		if (link_speeds & RTE_ETH_LINK_SPEED_100M)
667 			speed_mask |= AQ_NIC_RATE_100M;
668 	}
669 
670 	return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
671 }
672 
673 /*
674  * Set device link down: disable tx.
675  */
676 static int
atl_dev_set_link_down(struct rte_eth_dev * dev)677 atl_dev_set_link_down(struct rte_eth_dev *dev)
678 {
679 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680 
681 	return hw->aq_fw_ops->set_link_speed(hw, 0);
682 }
683 
684 /*
685  * Reset and stop device.
686  */
687 static int
atl_dev_close(struct rte_eth_dev * dev)688 atl_dev_close(struct rte_eth_dev *dev)
689 {
690 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
691 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
692 	struct aq_hw_s *hw;
693 	int ret;
694 
695 	PMD_INIT_FUNC_TRACE();
696 
697 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
698 		return 0;
699 
700 	hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
701 
702 	ret = atl_dev_stop(dev);
703 
704 	atl_free_queues(dev);
705 
706 	/* disable uio intr before callback unregister */
707 	rte_intr_disable(intr_handle);
708 	rte_intr_callback_unregister(intr_handle,
709 				     atl_dev_interrupt_handler, dev);
710 
711 	pthread_mutex_destroy(&hw->mbox_mutex);
712 
713 	return ret;
714 }
715 
716 static int
atl_dev_reset(struct rte_eth_dev * dev)717 atl_dev_reset(struct rte_eth_dev *dev)
718 {
719 	int ret;
720 
721 	ret = atl_dev_close(dev);
722 	if (ret)
723 		return ret;
724 
725 	ret = eth_atl_dev_init(dev);
726 
727 	return ret;
728 }
729 
730 static int
atl_dev_configure_macsec(struct rte_eth_dev * dev)731 atl_dev_configure_macsec(struct rte_eth_dev *dev)
732 {
733 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
734 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
735 	struct aq_macsec_config *aqcfg = &cf->aq_macsec;
736 	struct macsec_msg_fw_request msg_macsec;
737 	struct macsec_msg_fw_response response;
738 
739 	if (!aqcfg->common.macsec_enabled ||
740 	    hw->aq_fw_ops->send_macsec_req == NULL)
741 		return 0;
742 
743 	memset(&msg_macsec, 0, sizeof(msg_macsec));
744 
745 	/* Creating set of sc/sa structures from parameters provided by DPDK */
746 
747 	/* Configure macsec */
748 	msg_macsec.msg_type = macsec_cfg_msg;
749 	msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
750 	msg_macsec.cfg.interrupts_enabled = 1;
751 
752 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
753 
754 	if (response.result)
755 		return -1;
756 
757 	memset(&msg_macsec, 0, sizeof(msg_macsec));
758 
759 	/* Configure TX SC */
760 
761 	msg_macsec.msg_type = macsec_add_tx_sc_msg;
762 	msg_macsec.txsc.index = 0; /* TXSC always one (??) */
763 	msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
764 
765 	/* MAC addr for TX */
766 	msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
767 	msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
768 	msg_macsec.txsc.sa_mask = 0x3f;
769 
770 	msg_macsec.txsc.da_mask = 0;
771 	msg_macsec.txsc.tci = 0x0B;
772 	msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
773 
774 	/*
775 	 * Creating SCI (Secure Channel Identifier).
776 	 * SCI constructed from Source MAC and Port identifier
777 	 */
778 	uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
779 			       (msg_macsec.txsc.mac_sa[0] >> 16);
780 	uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
781 
782 	uint32_t port_identifier = 1;
783 
784 	msg_macsec.txsc.sci[1] = sci_hi_part;
785 	msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
786 
787 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
788 
789 	if (response.result)
790 		return -1;
791 
792 	memset(&msg_macsec, 0, sizeof(msg_macsec));
793 
794 	/* Configure RX SC */
795 
796 	msg_macsec.msg_type = macsec_add_rx_sc_msg;
797 	msg_macsec.rxsc.index = aqcfg->rxsc.pi;
798 	msg_macsec.rxsc.replay_protect =
799 		aqcfg->common.replay_protection_enabled;
800 	msg_macsec.rxsc.anti_replay_window = 0;
801 
802 	/* MAC addr for RX */
803 	msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
804 	msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
805 	msg_macsec.rxsc.da_mask = 0;//0x3f;
806 
807 	msg_macsec.rxsc.sa_mask = 0;
808 
809 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
810 
811 	if (response.result)
812 		return -1;
813 
814 	memset(&msg_macsec, 0, sizeof(msg_macsec));
815 
816 	/* Configure RX SC */
817 
818 	msg_macsec.msg_type = macsec_add_tx_sa_msg;
819 	msg_macsec.txsa.index = aqcfg->txsa.idx;
820 	msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
821 
822 	msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
823 	msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
824 	msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
825 	msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
826 
827 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
828 
829 	if (response.result)
830 		return -1;
831 
832 	memset(&msg_macsec, 0, sizeof(msg_macsec));
833 
834 	/* Configure RX SA */
835 
836 	msg_macsec.msg_type = macsec_add_rx_sa_msg;
837 	msg_macsec.rxsa.index = aqcfg->rxsa.idx;
838 	msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
839 
840 	msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
841 	msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
842 	msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
843 	msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
844 
845 	hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
846 
847 	if (response.result)
848 		return -1;
849 
850 	return 0;
851 }
852 
atl_macsec_enable(struct rte_eth_dev * dev,uint8_t encr,uint8_t repl_prot)853 int atl_macsec_enable(struct rte_eth_dev *dev,
854 		      uint8_t encr, uint8_t repl_prot)
855 {
856 	struct aq_hw_cfg_s *cfg =
857 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
858 
859 	cfg->aq_macsec.common.macsec_enabled = 1;
860 	cfg->aq_macsec.common.encryption_enabled = encr;
861 	cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
862 
863 	return 0;
864 }
865 
atl_macsec_disable(struct rte_eth_dev * dev)866 int atl_macsec_disable(struct rte_eth_dev *dev)
867 {
868 	struct aq_hw_cfg_s *cfg =
869 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
870 
871 	cfg->aq_macsec.common.macsec_enabled = 0;
872 
873 	return 0;
874 }
875 
atl_macsec_config_txsc(struct rte_eth_dev * dev,uint8_t * mac)876 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
877 {
878 	struct aq_hw_cfg_s *cfg =
879 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
880 
881 	memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
882 	memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
883 		RTE_ETHER_ADDR_LEN);
884 
885 	return 0;
886 }
887 
atl_macsec_config_rxsc(struct rte_eth_dev * dev,uint8_t * mac,uint16_t pi)888 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
889 			   uint8_t *mac, uint16_t pi)
890 {
891 	struct aq_hw_cfg_s *cfg =
892 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
893 
894 	memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
895 	memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
896 		RTE_ETHER_ADDR_LEN);
897 	cfg->aq_macsec.rxsc.pi = pi;
898 
899 	return 0;
900 }
901 
atl_macsec_select_txsa(struct rte_eth_dev * dev,uint8_t idx,uint8_t an,uint32_t pn,uint8_t * key)902 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
903 			   uint8_t idx, uint8_t an,
904 			   uint32_t pn, uint8_t *key)
905 {
906 	struct aq_hw_cfg_s *cfg =
907 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
908 
909 	cfg->aq_macsec.txsa.idx = idx;
910 	cfg->aq_macsec.txsa.pn = pn;
911 	cfg->aq_macsec.txsa.an = an;
912 
913 	memcpy(&cfg->aq_macsec.txsa.key, key, 16);
914 	return 0;
915 }
916 
atl_macsec_select_rxsa(struct rte_eth_dev * dev,uint8_t idx,uint8_t an,uint32_t pn,uint8_t * key)917 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
918 			   uint8_t idx, uint8_t an,
919 			   uint32_t pn, uint8_t *key)
920 {
921 	struct aq_hw_cfg_s *cfg =
922 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
923 
924 	cfg->aq_macsec.rxsa.idx = idx;
925 	cfg->aq_macsec.rxsa.pn = pn;
926 	cfg->aq_macsec.rxsa.an = an;
927 
928 	memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
929 	return 0;
930 }
931 
932 static int
atl_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)933 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
934 {
935 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
936 	struct aq_hw_s *hw = &adapter->hw;
937 	struct atl_sw_stats *swstats = &adapter->sw_stats;
938 	unsigned int i;
939 
940 	hw->aq_fw_ops->update_stats(hw);
941 
942 	/* Fill out the rte_eth_stats statistics structure */
943 	stats->ipackets = hw->curr_stats.dma_pkt_rc;
944 	stats->ibytes = hw->curr_stats.dma_oct_rc;
945 	stats->imissed = hw->curr_stats.dpc;
946 	stats->ierrors = hw->curr_stats.erpt;
947 
948 	stats->opackets = hw->curr_stats.dma_pkt_tc;
949 	stats->obytes = hw->curr_stats.dma_oct_tc;
950 	stats->oerrors = 0;
951 
952 	stats->rx_nombuf = swstats->rx_nombuf;
953 
954 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
955 		stats->q_ipackets[i] = swstats->q_ipackets[i];
956 		stats->q_opackets[i] = swstats->q_opackets[i];
957 		stats->q_ibytes[i] = swstats->q_ibytes[i];
958 		stats->q_obytes[i] = swstats->q_obytes[i];
959 		stats->q_errors[i] = swstats->q_errors[i];
960 	}
961 	return 0;
962 }
963 
964 static int
atl_dev_stats_reset(struct rte_eth_dev * dev)965 atl_dev_stats_reset(struct rte_eth_dev *dev)
966 {
967 	struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
968 	struct aq_hw_s *hw = &adapter->hw;
969 
970 	hw->aq_fw_ops->update_stats(hw);
971 
972 	/* Reset software totals */
973 	memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
974 
975 	memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
976 
977 	return 0;
978 }
979 
980 static int
atl_dev_xstats_get_count(struct rte_eth_dev * dev)981 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
982 {
983 	struct atl_adapter *adapter =
984 		(struct atl_adapter *)dev->data->dev_private;
985 
986 	struct aq_hw_s *hw = &adapter->hw;
987 	unsigned int i, count = 0;
988 
989 	for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
990 		if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
991 			((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
992 			continue;
993 
994 		count++;
995 	}
996 
997 	return count;
998 }
999 
1000 static int
atl_dev_xstats_get_names(struct rte_eth_dev * dev __rte_unused,struct rte_eth_xstat_name * xstats_names,unsigned int size)1001 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1002 			 struct rte_eth_xstat_name *xstats_names,
1003 			 unsigned int size)
1004 {
1005 	unsigned int i;
1006 	unsigned int count = atl_dev_xstats_get_count(dev);
1007 
1008 	if (xstats_names) {
1009 		for (i = 0; i < size && i < count; i++) {
1010 			snprintf(xstats_names[i].name,
1011 				RTE_ETH_XSTATS_NAME_SIZE, "%s",
1012 				atl_xstats_tbl[i].name);
1013 		}
1014 	}
1015 
1016 	return count;
1017 }
1018 
1019 static int
atl_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * stats,unsigned int n)1020 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1021 		   unsigned int n)
1022 {
1023 	struct atl_adapter *adapter = dev->data->dev_private;
1024 	struct aq_hw_s *hw = &adapter->hw;
1025 	struct get_stats req = { 0 };
1026 	struct macsec_msg_fw_request msg = { 0 };
1027 	struct macsec_msg_fw_response resp = { 0 };
1028 	int err = -1;
1029 	unsigned int i;
1030 	unsigned int count = atl_dev_xstats_get_count(dev);
1031 
1032 	if (!stats)
1033 		return count;
1034 
1035 	if (hw->aq_fw_ops->send_macsec_req != NULL) {
1036 		req.ingress_sa_index = 0xff;
1037 		req.egress_sc_index = 0xff;
1038 		req.egress_sa_index = 0xff;
1039 
1040 		msg.msg_type = macsec_get_stats_msg;
1041 		msg.stats = req;
1042 
1043 		err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1044 	}
1045 
1046 	for (i = 0; i < n && i < count; i++) {
1047 		stats[i].id = i;
1048 
1049 		switch (atl_xstats_tbl[i].type) {
1050 		case XSTATS_TYPE_MSM:
1051 			stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1052 					 atl_xstats_tbl[i].offset);
1053 			break;
1054 		case XSTATS_TYPE_MACSEC:
1055 			if (!err) {
1056 				stats[i].value =
1057 					*(u64 *)((uint8_t *)&resp.stats +
1058 					atl_xstats_tbl[i].offset);
1059 			}
1060 			break;
1061 		}
1062 	}
1063 
1064 	return i;
1065 }
1066 
1067 static int
atl_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)1068 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1069 {
1070 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1071 	uint32_t fw_ver = 0;
1072 	int ret = 0;
1073 
1074 	ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1075 	if (ret)
1076 		return -EIO;
1077 
1078 	ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1079 		       (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1080 	if (ret < 0)
1081 		return -EINVAL;
1082 
1083 	ret += 1; /* add string null-terminator */
1084 	if (fw_size < (size_t)ret)
1085 		return ret;
1086 
1087 	return 0;
1088 }
1089 
1090 static int
atl_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)1091 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1092 {
1093 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1094 
1095 	dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1096 	dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1097 
1098 	dev_info->min_rx_bufsize = 1024;
1099 	dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1100 	dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1101 	dev_info->max_vfs = pci_dev->max_vfs;
1102 
1103 	dev_info->max_hash_mac_addrs = 0;
1104 	dev_info->max_vmdq_pools = 0;
1105 	dev_info->vmdq_queue_num = 0;
1106 
1107 	dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1108 
1109 	dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1110 
1111 
1112 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1113 		.rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1114 	};
1115 
1116 	dev_info->default_txconf = (struct rte_eth_txconf) {
1117 		.tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1118 	};
1119 
1120 	dev_info->rx_desc_lim = rx_desc_lim;
1121 	dev_info->tx_desc_lim = tx_desc_lim;
1122 
1123 	dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1124 	dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1125 	dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1126 
1127 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
1128 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
1129 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_2_5G;
1130 	dev_info->speed_capa |= RTE_ETH_LINK_SPEED_5G;
1131 
1132 	return 0;
1133 }
1134 
1135 static const uint32_t *
atl_dev_supported_ptypes_get(struct rte_eth_dev * dev,size_t * no_of_elements)1136 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1137 {
1138 	static const uint32_t ptypes[] = {
1139 		RTE_PTYPE_L2_ETHER,
1140 		RTE_PTYPE_L2_ETHER_ARP,
1141 		RTE_PTYPE_L2_ETHER_VLAN,
1142 		RTE_PTYPE_L3_IPV4,
1143 		RTE_PTYPE_L3_IPV6,
1144 		RTE_PTYPE_L4_TCP,
1145 		RTE_PTYPE_L4_UDP,
1146 		RTE_PTYPE_L4_SCTP,
1147 		RTE_PTYPE_L4_ICMP,
1148 	};
1149 
1150 	if (dev->rx_pkt_burst == atl_recv_pkts) {
1151 		*no_of_elements = RTE_DIM(ptypes);
1152 		return ptypes;
1153 	}
1154 
1155 	return NULL;
1156 }
1157 
1158 static void
atl_dev_delayed_handler(void * param)1159 atl_dev_delayed_handler(void *param)
1160 {
1161 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1162 
1163 	atl_dev_configure_macsec(dev);
1164 }
1165 
1166 
1167 /* return 0 means link status changed, -1 means not changed */
1168 static int
atl_dev_link_update(struct rte_eth_dev * dev,int wait __rte_unused)1169 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1170 {
1171 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1172 	struct rte_eth_link link, old;
1173 	u32 fc = AQ_NIC_FC_OFF;
1174 	int err = 0;
1175 
1176 	link.link_status = RTE_ETH_LINK_DOWN;
1177 	link.link_speed = 0;
1178 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1179 	link.link_autoneg = hw->is_autoneg ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
1180 	memset(&old, 0, sizeof(old));
1181 
1182 	/* load old link status */
1183 	rte_eth_linkstatus_get(dev, &old);
1184 
1185 	/* read current link status */
1186 	err = hw->aq_fw_ops->update_link_status(hw);
1187 
1188 	if (err)
1189 		return 0;
1190 
1191 	if (hw->aq_link_status.mbps == 0) {
1192 		/* write default (down) link status */
1193 		rte_eth_linkstatus_set(dev, &link);
1194 		if (link.link_status == old.link_status)
1195 			return -1;
1196 		return 0;
1197 	}
1198 
1199 	link.link_status = RTE_ETH_LINK_UP;
1200 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1201 	link.link_speed = hw->aq_link_status.mbps;
1202 
1203 	rte_eth_linkstatus_set(dev, &link);
1204 
1205 	if (link.link_status == old.link_status)
1206 		return -1;
1207 
1208 	/* Driver has to update flow control settings on RX block
1209 	 * on any link event.
1210 	 * We should query FW whether it negotiated FC.
1211 	 */
1212 	if (hw->aq_fw_ops->get_flow_control) {
1213 		hw->aq_fw_ops->get_flow_control(hw, &fc);
1214 		hw_atl_b0_set_fc(hw, fc, 0U);
1215 	}
1216 
1217 	if (rte_eal_alarm_set(1000 * 1000,
1218 			      atl_dev_delayed_handler, (void *)dev) < 0)
1219 		PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1220 
1221 	return 0;
1222 }
1223 
1224 static int
atl_dev_promiscuous_enable(struct rte_eth_dev * dev)1225 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1226 {
1227 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1228 
1229 	hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1230 
1231 	return 0;
1232 }
1233 
1234 static int
atl_dev_promiscuous_disable(struct rte_eth_dev * dev)1235 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1236 {
1237 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1238 
1239 	hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1240 
1241 	return 0;
1242 }
1243 
1244 static int
atl_dev_allmulticast_enable(struct rte_eth_dev * dev)1245 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1246 {
1247 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1248 
1249 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1250 
1251 	return 0;
1252 }
1253 
1254 static int
atl_dev_allmulticast_disable(struct rte_eth_dev * dev)1255 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1256 {
1257 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1258 
1259 	if (dev->data->promiscuous == 1)
1260 		return 0; /* must remain in all_multicast mode */
1261 
1262 	hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1263 
1264 	return 0;
1265 }
1266 
1267 /**
1268  * It clears the interrupt causes and enables the interrupt.
1269  * It will be called once only during nic initialized.
1270  *
1271  * @param dev
1272  *  Pointer to struct rte_eth_dev.
1273  * @param on
1274  *  Enable or Disable.
1275  *
1276  * @return
1277  *  - On success, zero.
1278  *  - On failure, a negative value.
1279  */
1280 
1281 static int
atl_dev_lsc_interrupt_setup(struct rte_eth_dev * dev,uint8_t on __rte_unused)1282 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1283 {
1284 	atl_dev_link_status_print(dev);
1285 	return 0;
1286 }
1287 
1288 static int
atl_dev_rxq_interrupt_setup(struct rte_eth_dev * dev __rte_unused)1289 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1290 {
1291 	return 0;
1292 }
1293 
1294 
1295 static int
atl_dev_interrupt_get_status(struct rte_eth_dev * dev)1296 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1297 {
1298 	struct atl_interrupt *intr =
1299 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1300 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1301 	u64 cause = 0;
1302 
1303 	hw_atl_b0_hw_irq_read(hw, &cause);
1304 
1305 	atl_disable_intr(hw);
1306 
1307 	if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1308 		intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1309 
1310 	return 0;
1311 }
1312 
1313 /**
1314  * It gets and then prints the link status.
1315  *
1316  * @param dev
1317  *  Pointer to struct rte_eth_dev.
1318  *
1319  * @return
1320  *  - On success, zero.
1321  *  - On failure, a negative value.
1322  */
1323 static void
atl_dev_link_status_print(struct rte_eth_dev * dev)1324 atl_dev_link_status_print(struct rte_eth_dev *dev)
1325 {
1326 	struct rte_eth_link link;
1327 
1328 	memset(&link, 0, sizeof(link));
1329 	rte_eth_linkstatus_get(dev, &link);
1330 	if (link.link_status) {
1331 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1332 					(int)(dev->data->port_id),
1333 					(unsigned int)link.link_speed,
1334 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1335 					"full-duplex" : "half-duplex");
1336 	} else {
1337 		PMD_DRV_LOG(INFO, " Port %d: Link Down",
1338 				(int)(dev->data->port_id));
1339 	}
1340 
1341 
1342 #ifdef DEBUG
1343 {
1344 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1345 
1346 	PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1347 				pci_dev->addr.domain,
1348 				pci_dev->addr.bus,
1349 				pci_dev->addr.devid,
1350 				pci_dev->addr.function);
1351 }
1352 #endif
1353 
1354 	PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1355 }
1356 
1357 /*
1358  * It executes link_update after knowing an interrupt occurred.
1359  *
1360  * @param dev
1361  *  Pointer to struct rte_eth_dev.
1362  *
1363  * @return
1364  *  - On success, zero.
1365  *  - On failure, a negative value.
1366  */
1367 static int
atl_dev_interrupt_action(struct rte_eth_dev * dev,struct rte_intr_handle * intr_handle)1368 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1369 			   struct rte_intr_handle *intr_handle)
1370 {
1371 	struct atl_interrupt *intr =
1372 		ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1373 	struct atl_adapter *adapter = dev->data->dev_private;
1374 	struct aq_hw_s *hw = &adapter->hw;
1375 
1376 	if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1377 		goto done;
1378 
1379 	intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1380 
1381 	/* Notify userapp if link status changed */
1382 	if (!atl_dev_link_update(dev, 0)) {
1383 		atl_dev_link_status_print(dev);
1384 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1385 	} else {
1386 		if (hw->aq_fw_ops->send_macsec_req == NULL)
1387 			goto done;
1388 
1389 		/* Check macsec Keys expired */
1390 		struct get_stats req = { 0 };
1391 		struct macsec_msg_fw_request msg = { 0 };
1392 		struct macsec_msg_fw_response resp = { 0 };
1393 
1394 		req.ingress_sa_index = 0x0;
1395 		req.egress_sc_index = 0x0;
1396 		req.egress_sa_index = 0x0;
1397 		msg.msg_type = macsec_get_stats_msg;
1398 		msg.stats = req;
1399 
1400 		int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1401 		if (err) {
1402 			PMD_DRV_LOG(ERR, "send_macsec_req fail");
1403 			goto done;
1404 		}
1405 		if (resp.stats.egress_threshold_expired ||
1406 		    resp.stats.ingress_threshold_expired ||
1407 		    resp.stats.egress_expired ||
1408 		    resp.stats.ingress_expired) {
1409 			PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1410 			rte_eth_dev_callback_process(dev,
1411 				RTE_ETH_EVENT_MACSEC, NULL);
1412 		}
1413 	}
1414 done:
1415 	atl_enable_intr(dev);
1416 	rte_intr_ack(intr_handle);
1417 
1418 	return 0;
1419 }
1420 
1421 /**
1422  * Interrupt handler triggered by NIC  for handling
1423  * specific interrupt.
1424  *
1425  * @param handle
1426  *  Pointer to interrupt handle.
1427  * @param param
1428  *  The address of parameter (struct rte_eth_dev *) registered before.
1429  *
1430  * @return
1431  *  void
1432  */
1433 static void
atl_dev_interrupt_handler(void * param)1434 atl_dev_interrupt_handler(void *param)
1435 {
1436 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1437 
1438 	atl_dev_interrupt_get_status(dev);
1439 	atl_dev_interrupt_action(dev, dev->intr_handle);
1440 }
1441 
1442 
1443 static int
atl_dev_get_eeprom_length(struct rte_eth_dev * dev __rte_unused)1444 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1445 {
1446 	return SFP_EEPROM_SIZE;
1447 }
1448 
atl_dev_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * eeprom)1449 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1450 		       struct rte_dev_eeprom_info *eeprom)
1451 {
1452 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1453 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1454 
1455 	if (hw->aq_fw_ops->get_eeprom == NULL)
1456 		return -ENOTSUP;
1457 
1458 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1459 	    eeprom->data == NULL)
1460 		return -EINVAL;
1461 
1462 	if (eeprom->magic > 0x7F)
1463 		return -EINVAL;
1464 
1465 	if (eeprom->magic)
1466 		dev_addr = eeprom->magic;
1467 
1468 	return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1469 					 eeprom->length, eeprom->offset);
1470 }
1471 
atl_dev_set_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * eeprom)1472 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1473 		       struct rte_dev_eeprom_info *eeprom)
1474 {
1475 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1476 	uint32_t dev_addr = SMBUS_DEVICE_ID;
1477 
1478 	if (hw->aq_fw_ops->set_eeprom == NULL)
1479 		return -ENOTSUP;
1480 
1481 	if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1482 	    eeprom->data == NULL)
1483 		return -EINVAL;
1484 
1485 	if (eeprom->magic > 0x7F)
1486 		return -EINVAL;
1487 
1488 	if (eeprom->magic)
1489 		dev_addr = eeprom->magic;
1490 
1491 	return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1492 					 eeprom->length, eeprom->offset);
1493 }
1494 
1495 static int
atl_dev_get_regs(struct rte_eth_dev * dev,struct rte_dev_reg_info * regs)1496 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1497 {
1498 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1499 	u32 mif_id;
1500 	int err;
1501 
1502 	if (regs->data == NULL) {
1503 		regs->length = hw_atl_utils_hw_get_reg_length();
1504 		regs->width = sizeof(u32);
1505 		return 0;
1506 	}
1507 
1508 	/* Only full register dump is supported */
1509 	if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1510 		return -ENOTSUP;
1511 
1512 	err = hw_atl_utils_hw_get_regs(hw, regs->data);
1513 
1514 	/* Device version */
1515 	mif_id = hw_atl_reg_glb_mif_id_get(hw);
1516 	regs->version = mif_id & 0xFFU;
1517 
1518 	return err;
1519 }
1520 
1521 static int
atl_flow_ctrl_get(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)1522 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1523 {
1524 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1525 	u32 fc = AQ_NIC_FC_OFF;
1526 
1527 	if (hw->aq_fw_ops->get_flow_control == NULL)
1528 		return -ENOTSUP;
1529 
1530 	hw->aq_fw_ops->get_flow_control(hw, &fc);
1531 
1532 	if (fc == AQ_NIC_FC_OFF)
1533 		fc_conf->mode = RTE_ETH_FC_NONE;
1534 	else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1535 		fc_conf->mode = RTE_ETH_FC_FULL;
1536 	else if (fc & AQ_NIC_FC_RX)
1537 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
1538 	else if (fc & AQ_NIC_FC_TX)
1539 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
1540 
1541 	return 0;
1542 }
1543 
1544 static int
atl_flow_ctrl_set(struct rte_eth_dev * dev,struct rte_eth_fc_conf * fc_conf)1545 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1546 {
1547 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1548 	uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1549 
1550 
1551 	if (hw->aq_fw_ops->set_flow_control == NULL)
1552 		return -ENOTSUP;
1553 
1554 	if (fc_conf->mode == RTE_ETH_FC_NONE)
1555 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1556 	else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
1557 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1558 	else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
1559 		hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1560 	else if (fc_conf->mode == RTE_ETH_FC_FULL)
1561 		hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1562 
1563 	if (old_flow_control != hw->aq_nic_cfg->flow_control)
1564 		return hw->aq_fw_ops->set_flow_control(hw);
1565 
1566 	return 0;
1567 }
1568 
1569 static int
atl_update_mac_addr(struct rte_eth_dev * dev,uint32_t index,u8 * mac_addr,bool enable)1570 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1571 		    u8 *mac_addr, bool enable)
1572 {
1573 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1574 	unsigned int h = 0U;
1575 	unsigned int l = 0U;
1576 	int err;
1577 
1578 	if (mac_addr) {
1579 		h = (mac_addr[0] << 8) | (mac_addr[1]);
1580 		l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1581 			(mac_addr[4] << 8) | mac_addr[5];
1582 	}
1583 
1584 	hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1585 	hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1586 	hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1587 
1588 	if (enable)
1589 		hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1590 
1591 	err = aq_hw_err_from_flags(hw);
1592 
1593 	return err;
1594 }
1595 
1596 static int
atl_add_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index __rte_unused,uint32_t pool __rte_unused)1597 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1598 			uint32_t index __rte_unused, uint32_t pool __rte_unused)
1599 {
1600 	if (rte_is_zero_ether_addr(mac_addr)) {
1601 		PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1602 		return -EINVAL;
1603 	}
1604 
1605 	return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1606 }
1607 
1608 static void
atl_remove_mac_addr(struct rte_eth_dev * dev,uint32_t index)1609 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1610 {
1611 	atl_update_mac_addr(dev, index, NULL, false);
1612 }
1613 
1614 static int
atl_set_default_mac_addr(struct rte_eth_dev * dev,struct rte_ether_addr * addr)1615 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1616 {
1617 	atl_remove_mac_addr(dev, 0);
1618 	atl_add_mac_addr(dev, addr, 0, 0);
1619 	return 0;
1620 }
1621 
1622 static int
atl_dev_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)1623 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1624 {
1625 	struct rte_eth_dev_info dev_info;
1626 	int ret;
1627 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1628 
1629 	ret = atl_dev_info_get(dev, &dev_info);
1630 	if (ret != 0)
1631 		return ret;
1632 
1633 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1634 		return -EINVAL;
1635 
1636 	return 0;
1637 }
1638 
1639 static int
atl_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)1640 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1641 {
1642 	struct aq_hw_cfg_s *cfg =
1643 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1644 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1645 	int err = 0;
1646 	int i = 0;
1647 
1648 	PMD_INIT_FUNC_TRACE();
1649 
1650 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1651 		if (cfg->vlan_filter[i] == vlan_id) {
1652 			if (!on) {
1653 				/* Disable VLAN filter. */
1654 				hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1655 
1656 				/* Clear VLAN filter entry */
1657 				cfg->vlan_filter[i] = 0;
1658 			}
1659 			break;
1660 		}
1661 	}
1662 
1663 	/* VLAN_ID was not found. So, nothing to delete. */
1664 	if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1665 		goto exit;
1666 
1667 	/* VLAN_ID already exist, or already removed above. Nothing to do. */
1668 	if (i != HW_ATL_B0_MAX_VLAN_IDS)
1669 		goto exit;
1670 
1671 	/* Try to found free VLAN filter to add new VLAN_ID */
1672 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1673 		if (cfg->vlan_filter[i] == 0)
1674 			break;
1675 	}
1676 
1677 	if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1678 		/* We have no free VLAN filter to add new VLAN_ID*/
1679 		err = -ENOMEM;
1680 		goto exit;
1681 	}
1682 
1683 	cfg->vlan_filter[i] = vlan_id;
1684 	hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1685 	hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1686 	hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1687 
1688 exit:
1689 	/* Enable VLAN promisc mode if vlan_filter empty  */
1690 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1691 		if (cfg->vlan_filter[i] != 0)
1692 			break;
1693 	}
1694 
1695 	hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1696 
1697 	return err;
1698 }
1699 
1700 static int
atl_enable_vlan_filter(struct rte_eth_dev * dev,int en)1701 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1702 {
1703 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1704 	struct aq_hw_cfg_s *cfg =
1705 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1706 	int i;
1707 
1708 	PMD_INIT_FUNC_TRACE();
1709 
1710 	for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1711 		if (cfg->vlan_filter[i])
1712 			hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1713 	}
1714 	return 0;
1715 }
1716 
1717 static int
atl_vlan_offload_set(struct rte_eth_dev * dev,int mask)1718 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1719 {
1720 	struct aq_hw_cfg_s *cfg =
1721 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1722 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723 	int ret = 0;
1724 	int i;
1725 
1726 	PMD_INIT_FUNC_TRACE();
1727 
1728 	ret = atl_enable_vlan_filter(dev, mask & RTE_ETH_VLAN_FILTER_MASK);
1729 
1730 	cfg->vlan_strip = !!(mask & RTE_ETH_VLAN_STRIP_MASK);
1731 
1732 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1733 		hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1734 
1735 	if (mask & RTE_ETH_VLAN_EXTEND_MASK)
1736 		ret = -ENOTSUP;
1737 
1738 	return ret;
1739 }
1740 
1741 static int
atl_vlan_tpid_set(struct rte_eth_dev * dev,enum rte_vlan_type vlan_type,uint16_t tpid)1742 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1743 		  uint16_t tpid)
1744 {
1745 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1746 	int err = 0;
1747 
1748 	PMD_INIT_FUNC_TRACE();
1749 
1750 	switch (vlan_type) {
1751 	case RTE_ETH_VLAN_TYPE_INNER:
1752 		hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1753 		break;
1754 	case RTE_ETH_VLAN_TYPE_OUTER:
1755 		hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1756 		break;
1757 	default:
1758 		PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1759 		err = -ENOTSUP;
1760 	}
1761 
1762 	return err;
1763 }
1764 
1765 static void
atl_vlan_strip_queue_set(struct rte_eth_dev * dev,uint16_t queue_id,int on)1766 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1767 {
1768 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1769 
1770 	PMD_INIT_FUNC_TRACE();
1771 
1772 	if (queue_id > dev->data->nb_rx_queues) {
1773 		PMD_DRV_LOG(ERR, "Invalid queue id");
1774 		return;
1775 	}
1776 
1777 	hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1778 }
1779 
1780 static int
atl_dev_set_mc_addr_list(struct rte_eth_dev * dev,struct rte_ether_addr * mc_addr_set,uint32_t nb_mc_addr)1781 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1782 			  struct rte_ether_addr *mc_addr_set,
1783 			  uint32_t nb_mc_addr)
1784 {
1785 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1786 	u32 i;
1787 
1788 	if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1789 		return -EINVAL;
1790 
1791 	/* Update whole uc filters table */
1792 	for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1793 		u8 *mac_addr = NULL;
1794 		u32 l = 0, h = 0;
1795 
1796 		if (i < nb_mc_addr) {
1797 			mac_addr = mc_addr_set[i].addr_bytes;
1798 			l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1799 				(mac_addr[4] << 8) | mac_addr[5];
1800 			h = (mac_addr[0] << 8) | mac_addr[1];
1801 		}
1802 
1803 		hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1804 		hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1805 							HW_ATL_B0_MAC_MIN + i);
1806 		hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1807 							HW_ATL_B0_MAC_MIN + i);
1808 		hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1809 					   HW_ATL_B0_MAC_MIN + i);
1810 	}
1811 
1812 	return 0;
1813 }
1814 
1815 static int
atl_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1816 atl_reta_update(struct rte_eth_dev *dev,
1817 		   struct rte_eth_rss_reta_entry64 *reta_conf,
1818 		   uint16_t reta_size)
1819 {
1820 	int i;
1821 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1822 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1823 
1824 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1825 		cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1826 					dev->data->nb_rx_queues - 1);
1827 
1828 	hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1829 	return 0;
1830 }
1831 
1832 static int
atl_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1833 atl_reta_query(struct rte_eth_dev *dev,
1834 		    struct rte_eth_rss_reta_entry64 *reta_conf,
1835 		    uint16_t reta_size)
1836 {
1837 	int i;
1838 	struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1839 
1840 	for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1841 		reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1842 	reta_conf->mask = ~0U;
1843 	return 0;
1844 }
1845 
1846 static int
atl_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1847 atl_rss_hash_update(struct rte_eth_dev *dev,
1848 				 struct rte_eth_rss_conf *rss_conf)
1849 {
1850 	struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1851 	struct aq_hw_cfg_s *cfg =
1852 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1853 	static u8 def_rss_key[40] = {
1854 		0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1855 		0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1856 		0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1857 		0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1858 		0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1859 	};
1860 
1861 	cfg->is_rss = !!rss_conf->rss_hf;
1862 	if (rss_conf->rss_key) {
1863 		memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1864 		       rss_conf->rss_key_len);
1865 		cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1866 	} else {
1867 		memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1868 		       sizeof(def_rss_key));
1869 		cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1870 	}
1871 
1872 	hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1873 	hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1874 	return 0;
1875 }
1876 
1877 static int
atl_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)1878 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1879 				 struct rte_eth_rss_conf *rss_conf)
1880 {
1881 	struct aq_hw_cfg_s *cfg =
1882 		ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1883 
1884 	rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1885 	if (rss_conf->rss_key) {
1886 		rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1887 		memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1888 		       rss_conf->rss_key_len);
1889 	}
1890 
1891 	return 0;
1892 }
1893 
1894 static bool
is_device_supported(struct rte_eth_dev * dev,struct rte_pci_driver * drv)1895 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1896 {
1897 	if (strcmp(dev->device->driver->name, drv->driver.name))
1898 		return false;
1899 
1900 	return true;
1901 }
1902 
1903 bool
is_atlantic_supported(struct rte_eth_dev * dev)1904 is_atlantic_supported(struct rte_eth_dev *dev)
1905 {
1906 	return is_device_supported(dev, &rte_atl_pmd);
1907 }
1908 
1909 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1910 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1911 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1912 RTE_LOG_REGISTER_SUFFIX(atl_logtype_init, init, NOTICE);
1913 RTE_LOG_REGISTER_SUFFIX(atl_logtype_driver, driver, NOTICE);
1914