xref: /dpdk/drivers/net/cxgbe/cxgbe_ethdev.c (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
12aa5c722SRahul Lakkireddy /* SPDX-License-Identifier: BSD-3-Clause
22aa5c722SRahul Lakkireddy  * Copyright(c) 2014-2018 Chelsio Communications.
383189849SRahul Lakkireddy  * All rights reserved.
483189849SRahul Lakkireddy  */
583189849SRahul Lakkireddy 
683189849SRahul Lakkireddy #include <sys/queue.h>
783189849SRahul Lakkireddy #include <stdio.h>
883189849SRahul Lakkireddy #include <errno.h>
983189849SRahul Lakkireddy #include <stdint.h>
1083189849SRahul Lakkireddy #include <string.h>
1183189849SRahul Lakkireddy #include <unistd.h>
1283189849SRahul Lakkireddy #include <stdarg.h>
1383189849SRahul Lakkireddy #include <inttypes.h>
1483189849SRahul Lakkireddy #include <netinet/in.h>
1583189849SRahul Lakkireddy 
1683189849SRahul Lakkireddy #include <rte_byteorder.h>
1783189849SRahul Lakkireddy #include <rte_common.h>
1883189849SRahul Lakkireddy #include <rte_cycles.h>
1983189849SRahul Lakkireddy #include <rte_interrupts.h>
2083189849SRahul Lakkireddy #include <rte_log.h>
2183189849SRahul Lakkireddy #include <rte_debug.h>
2283189849SRahul Lakkireddy #include <rte_pci.h>
231f37cb2bSDavid Marchand #include <bus_pci_driver.h>
2483189849SRahul Lakkireddy #include <rte_branch_prediction.h>
2583189849SRahul Lakkireddy #include <rte_memory.h>
2683189849SRahul Lakkireddy #include <rte_tailq.h>
2783189849SRahul Lakkireddy #include <rte_eal.h>
2883189849SRahul Lakkireddy #include <rte_alarm.h>
2983189849SRahul Lakkireddy #include <rte_ether.h>
30df96fd0dSBruce Richardson #include <ethdev_driver.h>
31df96fd0dSBruce Richardson #include <ethdev_pci.h>
3283189849SRahul Lakkireddy #include <rte_malloc.h>
3383189849SRahul Lakkireddy #include <rte_random.h>
341acb7f54SDavid Marchand #include <dev_driver.h>
3583189849SRahul Lakkireddy 
3683189849SRahul Lakkireddy #include "cxgbe.h"
37011ebc23SKumar Sanghvi #include "cxgbe_pfvf.h"
38ee61f511SShagun Agrawal #include "cxgbe_flow.h"
3983189849SRahul Lakkireddy 
4083189849SRahul Lakkireddy /*
4183189849SRahul Lakkireddy  * Macros needed to support the PCI Device ID Table ...
4283189849SRahul Lakkireddy  */
4383189849SRahul Lakkireddy #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
4428a1fd4fSFerruh Yigit 	static const struct rte_pci_id cxgb4_pci_tbl[] = {
4583189849SRahul Lakkireddy #define CH_PCI_DEVICE_ID_FUNCTION 0x4
4683189849SRahul Lakkireddy 
4783189849SRahul Lakkireddy #define PCI_VENDOR_ID_CHELSIO 0x1425
4883189849SRahul Lakkireddy 
4983189849SRahul Lakkireddy #define CH_PCI_ID_TABLE_ENTRY(devid) \
5083189849SRahul Lakkireddy 		{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
5183189849SRahul Lakkireddy 
5283189849SRahul Lakkireddy #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
5383189849SRahul Lakkireddy 		{ .vendor_id = 0, } \
5483189849SRahul Lakkireddy 	}
5583189849SRahul Lakkireddy 
5683189849SRahul Lakkireddy /*
5783189849SRahul Lakkireddy  *... and the PCI ID Table itself ...
5883189849SRahul Lakkireddy  */
5989c8bd95SRahul Lakkireddy #include "base/t4_pci_id_tbl.h"
6083189849SRahul Lakkireddy 
cxgbe_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)61880ead4eSKumar Sanghvi uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
624a01078bSRahul Lakkireddy 			 uint16_t nb_pkts)
634a01078bSRahul Lakkireddy {
644a01078bSRahul Lakkireddy 	struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
654a01078bSRahul Lakkireddy 	uint16_t pkts_sent, pkts_remain;
664a01078bSRahul Lakkireddy 	uint16_t total_sent = 0;
67b1df19e4SRahul Lakkireddy 	uint16_t idx = 0;
684a01078bSRahul Lakkireddy 	int ret = 0;
694a01078bSRahul Lakkireddy 
704a01078bSRahul Lakkireddy 	t4_os_lock(&txq->txq_lock);
714a01078bSRahul Lakkireddy 	/* free up desc from already completed tx */
724a01078bSRahul Lakkireddy 	reclaim_completed_tx(&txq->q);
73dca62adeSRahul Lakkireddy 	if (unlikely(!nb_pkts))
74dca62adeSRahul Lakkireddy 		goto out_unlock;
75dca62adeSRahul Lakkireddy 
76b1df19e4SRahul Lakkireddy 	rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
774a01078bSRahul Lakkireddy 	while (total_sent < nb_pkts) {
784a01078bSRahul Lakkireddy 		pkts_remain = nb_pkts - total_sent;
794a01078bSRahul Lakkireddy 
804a01078bSRahul Lakkireddy 		for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
81b1df19e4SRahul Lakkireddy 			idx = total_sent + pkts_sent;
82b1df19e4SRahul Lakkireddy 			if ((idx + 1) < nb_pkts)
83b1df19e4SRahul Lakkireddy 				rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
84b1df19e4SRahul Lakkireddy 							volatile void *));
85b1df19e4SRahul Lakkireddy 			ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
864a01078bSRahul Lakkireddy 			if (ret < 0)
874a01078bSRahul Lakkireddy 				break;
884a01078bSRahul Lakkireddy 		}
894a01078bSRahul Lakkireddy 		if (!pkts_sent)
904a01078bSRahul Lakkireddy 			break;
914a01078bSRahul Lakkireddy 		total_sent += pkts_sent;
924a01078bSRahul Lakkireddy 		/* reclaim as much as possible */
934a01078bSRahul Lakkireddy 		reclaim_completed_tx(&txq->q);
944a01078bSRahul Lakkireddy 	}
954a01078bSRahul Lakkireddy 
96dca62adeSRahul Lakkireddy out_unlock:
974a01078bSRahul Lakkireddy 	t4_os_unlock(&txq->txq_lock);
984a01078bSRahul Lakkireddy 	return total_sent;
994a01078bSRahul Lakkireddy }
1004a01078bSRahul Lakkireddy 
cxgbe_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)101880ead4eSKumar Sanghvi uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
10292c8a632SRahul Lakkireddy 			 uint16_t nb_pkts)
10392c8a632SRahul Lakkireddy {
10492c8a632SRahul Lakkireddy 	struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
10592c8a632SRahul Lakkireddy 	unsigned int work_done;
10692c8a632SRahul Lakkireddy 
10792c8a632SRahul Lakkireddy 	if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
10892c8a632SRahul Lakkireddy 		dev_err(adapter, "error in cxgbe poll\n");
10992c8a632SRahul Lakkireddy 
11092c8a632SRahul Lakkireddy 	return work_done;
11192c8a632SRahul Lakkireddy }
11292c8a632SRahul Lakkireddy 
cxgbe_dev_info_get(struct rte_eth_dev * eth_dev,struct rte_eth_dev_info * device_info)113bdad90d1SIvan Ilchenko int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
11492c8a632SRahul Lakkireddy 			struct rte_eth_dev_info *device_info)
11592c8a632SRahul Lakkireddy {
11663a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
11792c8a632SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
11892c8a632SRahul Lakkireddy 
119946c9ed9SKonstantin Ananyev 	static const struct rte_eth_desc_lim cxgbe_desc_lim = {
120946c9ed9SKonstantin Ananyev 		.nb_max = CXGBE_MAX_RING_DESC_SIZE,
121946c9ed9SKonstantin Ananyev 		.nb_min = CXGBE_MIN_RING_DESC_SIZE,
122946c9ed9SKonstantin Ananyev 		.nb_align = 1,
123946c9ed9SKonstantin Ananyev 	};
124946c9ed9SKonstantin Ananyev 
1254b2eff45SRahul Lakkireddy 	device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
1264b2eff45SRahul Lakkireddy 	device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
1277b3d5298SRahul Lakkireddy 	device_info->max_rx_queues = adapter->sge.max_ethqsets;
1287b3d5298SRahul Lakkireddy 	device_info->max_tx_queues = adapter->sge.max_ethqsets;
12992c8a632SRahul Lakkireddy 	device_info->max_mac_addrs = 1;
13092c8a632SRahul Lakkireddy 	/* XXX: For now we support one MAC/port */
13192c8a632SRahul Lakkireddy 	device_info->max_vfs = adapter->params.arch.vfcount;
13292c8a632SRahul Lakkireddy 	device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
13392c8a632SRahul Lakkireddy 
1342fe6f1b7SDmitry Kozlyuk 	device_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1352fe6f1b7SDmitry Kozlyuk 
136436125e6SShagun Agrawal 	device_info->rx_queue_offload_capa = 0UL;
137436125e6SShagun Agrawal 	device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
13892c8a632SRahul Lakkireddy 
139436125e6SShagun Agrawal 	device_info->tx_queue_offload_capa = 0UL;
140436125e6SShagun Agrawal 	device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
14192c8a632SRahul Lakkireddy 
14292c8a632SRahul Lakkireddy 	device_info->reta_size = pi->rss_size;
14308e21af9SKumar Sanghvi 	device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
14408e21af9SKumar Sanghvi 	device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
145946c9ed9SKonstantin Ananyev 
146946c9ed9SKonstantin Ananyev 	device_info->rx_desc_lim = cxgbe_desc_lim;
147946c9ed9SKonstantin Ananyev 	device_info->tx_desc_lim = cxgbe_desc_lim;
148e307e65bSRahul Lakkireddy 	cxgbe_get_speed_caps(pi, &device_info->speed_capa);
149bdad90d1SIvan Ilchenko 
150bdad90d1SIvan Ilchenko 	return 0;
15192c8a632SRahul Lakkireddy }
15292c8a632SRahul Lakkireddy 
cxgbe_dev_promiscuous_enable(struct rte_eth_dev * eth_dev)1539039c812SAndrew Rybchenko int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
154cdac6e2eSRahul Lakkireddy {
15563a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
156cdac6e2eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
157422d7823SRahul Lakkireddy 	int ret;
158422d7823SRahul Lakkireddy 
159422d7823SRahul Lakkireddy 	if (adapter->params.rawf_size != 0) {
160422d7823SRahul Lakkireddy 		ret = cxgbe_mpstcam_rawf_enable(pi);
161422d7823SRahul Lakkireddy 		if (ret < 0)
162422d7823SRahul Lakkireddy 			return ret;
163422d7823SRahul Lakkireddy 	}
164cdac6e2eSRahul Lakkireddy 
1659039c812SAndrew Rybchenko 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
166cdac6e2eSRahul Lakkireddy 			     1, -1, 1, -1, false);
167cdac6e2eSRahul Lakkireddy }
168cdac6e2eSRahul Lakkireddy 
cxgbe_dev_promiscuous_disable(struct rte_eth_dev * eth_dev)1699039c812SAndrew Rybchenko int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
170cdac6e2eSRahul Lakkireddy {
17163a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
172cdac6e2eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
173422d7823SRahul Lakkireddy 	int ret;
174422d7823SRahul Lakkireddy 
175422d7823SRahul Lakkireddy 	if (adapter->params.rawf_size != 0) {
176422d7823SRahul Lakkireddy 		ret = cxgbe_mpstcam_rawf_disable(pi);
177422d7823SRahul Lakkireddy 		if (ret < 0)
178422d7823SRahul Lakkireddy 			return ret;
179422d7823SRahul Lakkireddy 	}
180cdac6e2eSRahul Lakkireddy 
1819039c812SAndrew Rybchenko 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
182cdac6e2eSRahul Lakkireddy 			     0, -1, 1, -1, false);
183cdac6e2eSRahul Lakkireddy }
184cdac6e2eSRahul Lakkireddy 
cxgbe_dev_allmulticast_enable(struct rte_eth_dev * eth_dev)185ca041cd4SIvan Ilchenko int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
186cdac6e2eSRahul Lakkireddy {
18763a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
188cdac6e2eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
189cdac6e2eSRahul Lakkireddy 
190cdac6e2eSRahul Lakkireddy 	/* TODO: address filters ?? */
191cdac6e2eSRahul Lakkireddy 
192ca041cd4SIvan Ilchenko 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
193cdac6e2eSRahul Lakkireddy 			     -1, 1, 1, -1, false);
194cdac6e2eSRahul Lakkireddy }
195cdac6e2eSRahul Lakkireddy 
cxgbe_dev_allmulticast_disable(struct rte_eth_dev * eth_dev)196ca041cd4SIvan Ilchenko int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
197cdac6e2eSRahul Lakkireddy {
19863a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
199cdac6e2eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
200cdac6e2eSRahul Lakkireddy 
201cdac6e2eSRahul Lakkireddy 	/* TODO: address filters ?? */
202cdac6e2eSRahul Lakkireddy 
203ca041cd4SIvan Ilchenko 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
204cdac6e2eSRahul Lakkireddy 			     -1, 0, 1, -1, false);
205cdac6e2eSRahul Lakkireddy }
206cdac6e2eSRahul Lakkireddy 
cxgbe_dev_link_update(struct rte_eth_dev * eth_dev,int wait_to_complete)207011ebc23SKumar Sanghvi int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
208265af08eSRahul Lakkireddy 			  int wait_to_complete)
209cdac6e2eSRahul Lakkireddy {
21063a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
211265af08eSRahul Lakkireddy 	unsigned int i, work_done, budget = 32;
212a83041b1SKarra Satwik 	struct link_config *lc = &pi->link_cfg;
213a83041b1SKarra Satwik 	struct adapter *adapter = pi->adapter;
214265af08eSRahul Lakkireddy 	u8 old_link = pi->link_cfg.link_ok;
215a83041b1SKarra Satwik 	struct sge *s = &adapter->sge;
216*b9a87346SChengwen Feng 	struct rte_eth_link new_link;
217cdac6e2eSRahul Lakkireddy 
218265af08eSRahul Lakkireddy 	for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
21910fb9e47SRahul Lakkireddy 		if (!s->fw_evtq.desc)
22010fb9e47SRahul Lakkireddy 			break;
22110fb9e47SRahul Lakkireddy 
222cdac6e2eSRahul Lakkireddy 		cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
223cdac6e2eSRahul Lakkireddy 
224265af08eSRahul Lakkireddy 		/* Exit if link status changed or always forced up */
225b7fd9ea8SStephen Hemminger 		if (pi->link_cfg.link_ok != old_link ||
226b7fd9ea8SStephen Hemminger 		    cxgbe_force_linkup(adapter))
227265af08eSRahul Lakkireddy 			break;
228265af08eSRahul Lakkireddy 
229265af08eSRahul Lakkireddy 		if (!wait_to_complete)
230265af08eSRahul Lakkireddy 			break;
231265af08eSRahul Lakkireddy 
232265af08eSRahul Lakkireddy 		rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
233265af08eSRahul Lakkireddy 	}
234265af08eSRahul Lakkireddy 
235*b9a87346SChengwen Feng 	memset(&new_link, 0, sizeof(new_link));
236b7fd9ea8SStephen Hemminger 	new_link.link_status = cxgbe_force_linkup(adapter) ?
237295968d1SFerruh Yigit 			       RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
238a83041b1SKarra Satwik 	new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
239295968d1SFerruh Yigit 	new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
240a83041b1SKarra Satwik 	new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
241cdac6e2eSRahul Lakkireddy 
242f5b3c7b2SShagun Agrawal 	return rte_eth_linkstatus_set(eth_dev, &new_link);
243cdac6e2eSRahul Lakkireddy }
244cdac6e2eSRahul Lakkireddy 
245265af08eSRahul Lakkireddy /**
246265af08eSRahul Lakkireddy  * Set device link up.
247265af08eSRahul Lakkireddy  */
cxgbe_dev_set_link_up(struct rte_eth_dev * dev)248265af08eSRahul Lakkireddy int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
249265af08eSRahul Lakkireddy {
25063a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
251265af08eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
252265af08eSRahul Lakkireddy 	unsigned int work_done, budget = 32;
253265af08eSRahul Lakkireddy 	struct sge *s = &adapter->sge;
254265af08eSRahul Lakkireddy 	int ret;
255265af08eSRahul Lakkireddy 
25610fb9e47SRahul Lakkireddy 	if (!s->fw_evtq.desc)
25710fb9e47SRahul Lakkireddy 		return -ENOMEM;
25810fb9e47SRahul Lakkireddy 
259265af08eSRahul Lakkireddy 	/* Flush all link events */
260265af08eSRahul Lakkireddy 	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
261265af08eSRahul Lakkireddy 
262265af08eSRahul Lakkireddy 	/* If link already up, nothing to do */
263265af08eSRahul Lakkireddy 	if (pi->link_cfg.link_ok)
264265af08eSRahul Lakkireddy 		return 0;
265265af08eSRahul Lakkireddy 
266265af08eSRahul Lakkireddy 	ret = cxgbe_set_link_status(pi, true);
267265af08eSRahul Lakkireddy 	if (ret)
268265af08eSRahul Lakkireddy 		return ret;
269265af08eSRahul Lakkireddy 
270265af08eSRahul Lakkireddy 	cxgbe_dev_link_update(dev, 1);
271265af08eSRahul Lakkireddy 	return 0;
272265af08eSRahul Lakkireddy }
273265af08eSRahul Lakkireddy 
274265af08eSRahul Lakkireddy /**
275265af08eSRahul Lakkireddy  * Set device link down.
276265af08eSRahul Lakkireddy  */
cxgbe_dev_set_link_down(struct rte_eth_dev * dev)277265af08eSRahul Lakkireddy int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
278265af08eSRahul Lakkireddy {
27963a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
280265af08eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
281265af08eSRahul Lakkireddy 	unsigned int work_done, budget = 32;
282265af08eSRahul Lakkireddy 	struct sge *s = &adapter->sge;
283265af08eSRahul Lakkireddy 	int ret;
284265af08eSRahul Lakkireddy 
28510fb9e47SRahul Lakkireddy 	if (!s->fw_evtq.desc)
28610fb9e47SRahul Lakkireddy 		return -ENOMEM;
28710fb9e47SRahul Lakkireddy 
288265af08eSRahul Lakkireddy 	/* Flush all link events */
289265af08eSRahul Lakkireddy 	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
290265af08eSRahul Lakkireddy 
291265af08eSRahul Lakkireddy 	/* If link already down, nothing to do */
292265af08eSRahul Lakkireddy 	if (!pi->link_cfg.link_ok)
293265af08eSRahul Lakkireddy 		return 0;
294265af08eSRahul Lakkireddy 
295265af08eSRahul Lakkireddy 	ret = cxgbe_set_link_status(pi, false);
296265af08eSRahul Lakkireddy 	if (ret)
297265af08eSRahul Lakkireddy 		return ret;
298265af08eSRahul Lakkireddy 
299265af08eSRahul Lakkireddy 	cxgbe_dev_link_update(dev, 0);
300265af08eSRahul Lakkireddy 	return 0;
301265af08eSRahul Lakkireddy }
302265af08eSRahul Lakkireddy 
cxgbe_dev_mtu_set(struct rte_eth_dev * eth_dev,uint16_t mtu)303011ebc23SKumar Sanghvi int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
3040ec33be4SRahul Lakkireddy {
30563a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
3060ec33be4SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
30735b2d13fSOlivier Matz 	uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3080ec33be4SRahul Lakkireddy 
309f7e04f57SFerruh Yigit 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
3100ec33be4SRahul Lakkireddy 			    -1, -1, true);
3110ec33be4SRahul Lakkireddy }
3120ec33be4SRahul Lakkireddy 
3130462d115SRahul Lakkireddy /*
3140462d115SRahul Lakkireddy  * Stop device.
3150462d115SRahul Lakkireddy  */
cxgbe_dev_close(struct rte_eth_dev * eth_dev)316b142387bSThomas Monjalon int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
3170462d115SRahul Lakkireddy {
31811df4a68SRahul Lakkireddy 	struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
3190462d115SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
32011df4a68SRahul Lakkireddy 	u8 i;
3210462d115SRahul Lakkireddy 
3220462d115SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
3230462d115SRahul Lakkireddy 
32430410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
32530410493SThomas Monjalon 		return 0;
32630410493SThomas Monjalon 
3270462d115SRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE))
328b142387bSThomas Monjalon 		return 0;
3290462d115SRahul Lakkireddy 
33011df4a68SRahul Lakkireddy 	if (!pi->viid)
331b142387bSThomas Monjalon 		return 0;
33211df4a68SRahul Lakkireddy 
3330462d115SRahul Lakkireddy 	cxgbe_down(pi);
3346b78a629SRahul Lakkireddy 	t4_sge_eth_release_queues(pi);
33511df4a68SRahul Lakkireddy 	t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
33611df4a68SRahul Lakkireddy 	pi->viid = 0;
33711df4a68SRahul Lakkireddy 
33811df4a68SRahul Lakkireddy 	/* Free up the adapter-wide resources only after all the ports
33911df4a68SRahul Lakkireddy 	 * under this PF have been closed.
34011df4a68SRahul Lakkireddy 	 */
34111df4a68SRahul Lakkireddy 	for_each_port(adapter, i) {
34211df4a68SRahul Lakkireddy 		temp_pi = adap2pinfo(adapter, i);
34311df4a68SRahul Lakkireddy 		if (temp_pi->viid)
344b142387bSThomas Monjalon 			return 0;
34511df4a68SRahul Lakkireddy 	}
34611df4a68SRahul Lakkireddy 
34711df4a68SRahul Lakkireddy 	cxgbe_close(adapter);
34811df4a68SRahul Lakkireddy 	rte_free(adapter);
349b142387bSThomas Monjalon 
350b142387bSThomas Monjalon 	return 0;
3510462d115SRahul Lakkireddy }
3520462d115SRahul Lakkireddy 
3530462d115SRahul Lakkireddy /* Start the device.
3540462d115SRahul Lakkireddy  * It returns 0 on success.
3550462d115SRahul Lakkireddy  */
cxgbe_dev_start(struct rte_eth_dev * eth_dev)356011ebc23SKumar Sanghvi int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
3570462d115SRahul Lakkireddy {
35863a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
3590f3ff244SRahul Lakkireddy 	struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
3600462d115SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
3610462d115SRahul Lakkireddy 	int err = 0, i;
3620462d115SRahul Lakkireddy 
3630462d115SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
3640462d115SRahul Lakkireddy 
3650462d115SRahul Lakkireddy 	/*
3660462d115SRahul Lakkireddy 	 * If we don't have a connection to the firmware there's nothing we
3670462d115SRahul Lakkireddy 	 * can do.
3680462d115SRahul Lakkireddy 	 */
3690462d115SRahul Lakkireddy 	if (!(adapter->flags & FW_OK)) {
3700462d115SRahul Lakkireddy 		err = -ENXIO;
3710462d115SRahul Lakkireddy 		goto out;
3720462d115SRahul Lakkireddy 	}
3730462d115SRahul Lakkireddy 
3740462d115SRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE)) {
3750462d115SRahul Lakkireddy 		err = cxgbe_up(adapter);
3760462d115SRahul Lakkireddy 		if (err < 0)
3770462d115SRahul Lakkireddy 			goto out;
3780462d115SRahul Lakkireddy 	}
3790462d115SRahul Lakkireddy 
380295968d1SFerruh Yigit 	if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
3810f3ff244SRahul Lakkireddy 		eth_dev->data->scattered_rx = 1;
3820f3ff244SRahul Lakkireddy 	else
3830f3ff244SRahul Lakkireddy 		eth_dev->data->scattered_rx = 0;
3840f3ff244SRahul Lakkireddy 
385d87ba24dSRahul Lakkireddy 	cxgbe_enable_rx_queues(pi);
386d87ba24dSRahul Lakkireddy 
387b7fd9ea8SStephen Hemminger 	err = cxgbe_setup_rss(pi);
3880462d115SRahul Lakkireddy 	if (err)
3890462d115SRahul Lakkireddy 		goto out;
3900462d115SRahul Lakkireddy 
3910462d115SRahul Lakkireddy 	for (i = 0; i < pi->n_tx_qsets; i++) {
3920462d115SRahul Lakkireddy 		err = cxgbe_dev_tx_queue_start(eth_dev, i);
3930462d115SRahul Lakkireddy 		if (err)
3940462d115SRahul Lakkireddy 			goto out;
3950462d115SRahul Lakkireddy 	}
3960462d115SRahul Lakkireddy 
3970462d115SRahul Lakkireddy 	for (i = 0; i < pi->n_rx_qsets; i++) {
3980462d115SRahul Lakkireddy 		err = cxgbe_dev_rx_queue_start(eth_dev, i);
3990462d115SRahul Lakkireddy 		if (err)
4000462d115SRahul Lakkireddy 			goto out;
4010462d115SRahul Lakkireddy 	}
4020462d115SRahul Lakkireddy 
403b7fd9ea8SStephen Hemminger 	err = cxgbe_link_start(pi);
4040462d115SRahul Lakkireddy 	if (err)
4050462d115SRahul Lakkireddy 		goto out;
4060462d115SRahul Lakkireddy 
4070462d115SRahul Lakkireddy out:
4080462d115SRahul Lakkireddy 	return err;
4090462d115SRahul Lakkireddy }
4100462d115SRahul Lakkireddy 
4110462d115SRahul Lakkireddy /*
4120462d115SRahul Lakkireddy  * Stop device: disable rx and tx functions to allow for reconfiguring.
4130462d115SRahul Lakkireddy  */
cxgbe_dev_stop(struct rte_eth_dev * eth_dev)41462024eb8SIvan Ilchenko int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
4150462d115SRahul Lakkireddy {
41663a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
4170462d115SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
418a2bce003SJie Hai 	uint16_t i;
4190462d115SRahul Lakkireddy 
4200462d115SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
4210462d115SRahul Lakkireddy 
4220462d115SRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE))
42362024eb8SIvan Ilchenko 		return 0;
4240462d115SRahul Lakkireddy 
4250462d115SRahul Lakkireddy 	cxgbe_down(pi);
4260462d115SRahul Lakkireddy 
4270462d115SRahul Lakkireddy 	/*
4280462d115SRahul Lakkireddy 	 *  We clear queues only if both tx and rx path of the port
4290462d115SRahul Lakkireddy 	 *  have been disabled
4300462d115SRahul Lakkireddy 	 */
4310462d115SRahul Lakkireddy 	t4_sge_eth_clear_queues(pi);
4320f3ff244SRahul Lakkireddy 	eth_dev->data->scattered_rx = 0;
43362024eb8SIvan Ilchenko 
434a2bce003SJie Hai 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
435a2bce003SJie Hai 		eth_dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
436a2bce003SJie Hai 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
437a2bce003SJie Hai 		eth_dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
438a2bce003SJie Hai 
43962024eb8SIvan Ilchenko 	return 0;
4400462d115SRahul Lakkireddy }
4410462d115SRahul Lakkireddy 
cxgbe_dev_configure(struct rte_eth_dev * eth_dev)442011ebc23SKumar Sanghvi int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
44392c8a632SRahul Lakkireddy {
44463a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
44592c8a632SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
44692c8a632SRahul Lakkireddy 	int err;
44792c8a632SRahul Lakkireddy 
44892c8a632SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
44992c8a632SRahul Lakkireddy 
450295968d1SFerruh Yigit 	if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
45173fb89ddSAndrew Rybchenko 		eth_dev->data->dev_conf.rxmode.offloads |=
452295968d1SFerruh Yigit 			RTE_ETH_RX_OFFLOAD_RSS_HASH;
4538b945a7fSPavan Nikhilesh 
45492c8a632SRahul Lakkireddy 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
455b7fd9ea8SStephen Hemminger 		err = cxgbe_setup_sge_fwevtq(adapter);
45692c8a632SRahul Lakkireddy 		if (err)
45792c8a632SRahul Lakkireddy 			return err;
45892c8a632SRahul Lakkireddy 		adapter->flags |= FW_QUEUE_BOUND;
459a0163693SShagun Agrawal 		if (is_pf4(adapter)) {
460b7fd9ea8SStephen Hemminger 			err = cxgbe_setup_sge_ctrl_txq(adapter);
4613a3aaabcSShagun Agrawal 			if (err)
4623a3aaabcSShagun Agrawal 				return err;
46392c8a632SRahul Lakkireddy 		}
464a0163693SShagun Agrawal 	}
46592c8a632SRahul Lakkireddy 
466b7fd9ea8SStephen Hemminger 	err = cxgbe_cfg_queue_count(eth_dev);
46792c8a632SRahul Lakkireddy 	if (err)
46892c8a632SRahul Lakkireddy 		return err;
46992c8a632SRahul Lakkireddy 
47092c8a632SRahul Lakkireddy 	return 0;
47192c8a632SRahul Lakkireddy }
47292c8a632SRahul Lakkireddy 
cxgbe_dev_tx_queue_start(struct rte_eth_dev * eth_dev,uint16_t tx_queue_id)473011ebc23SKumar Sanghvi int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
4744a01078bSRahul Lakkireddy {
4756b6861c1SPablo de Lara 	int ret;
4764a01078bSRahul Lakkireddy 	struct sge_eth_txq *txq = (struct sge_eth_txq *)
4774a01078bSRahul Lakkireddy 				  (eth_dev->data->tx_queues[tx_queue_id]);
4784a01078bSRahul Lakkireddy 
4794a01078bSRahul Lakkireddy 	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
4804a01078bSRahul Lakkireddy 
4816b6861c1SPablo de Lara 	ret = t4_sge_eth_txq_start(txq);
4826b6861c1SPablo de Lara 	if (ret == 0)
4836b6861c1SPablo de Lara 		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4846b6861c1SPablo de Lara 
4856b6861c1SPablo de Lara 	return ret;
4864a01078bSRahul Lakkireddy }
4874a01078bSRahul Lakkireddy 
cxgbe_dev_tx_queue_stop(struct rte_eth_dev * eth_dev,uint16_t tx_queue_id)488011ebc23SKumar Sanghvi int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
4894a01078bSRahul Lakkireddy {
4906b6861c1SPablo de Lara 	int ret;
4914a01078bSRahul Lakkireddy 	struct sge_eth_txq *txq = (struct sge_eth_txq *)
4924a01078bSRahul Lakkireddy 				  (eth_dev->data->tx_queues[tx_queue_id]);
4934a01078bSRahul Lakkireddy 
4944a01078bSRahul Lakkireddy 	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
4954a01078bSRahul Lakkireddy 
4966b6861c1SPablo de Lara 	ret = t4_sge_eth_txq_stop(txq);
4976b6861c1SPablo de Lara 	if (ret == 0)
4986b6861c1SPablo de Lara 		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4996b6861c1SPablo de Lara 
5006b6861c1SPablo de Lara 	return ret;
5014a01078bSRahul Lakkireddy }
5024a01078bSRahul Lakkireddy 
cxgbe_dev_tx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf __rte_unused)503011ebc23SKumar Sanghvi int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
5044a01078bSRahul Lakkireddy 			     uint16_t queue_idx, uint16_t nb_desc,
5054a01078bSRahul Lakkireddy 			     unsigned int socket_id,
506a4996bd8SWei Dai 			     const struct rte_eth_txconf *tx_conf __rte_unused)
5074a01078bSRahul Lakkireddy {
50863a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
5094a01078bSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
5104a01078bSRahul Lakkireddy 	struct sge *s = &adapter->sge;
5114a01078bSRahul Lakkireddy 	unsigned int temp_nb_desc;
5127b3d5298SRahul Lakkireddy 	struct sge_eth_txq *txq;
5137b3d5298SRahul Lakkireddy 	int err = 0;
5144a01078bSRahul Lakkireddy 
5157b3d5298SRahul Lakkireddy 	txq = &s->ethtxq[pi->first_txqset + queue_idx];
5164a01078bSRahul Lakkireddy 	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
5174a01078bSRahul Lakkireddy 		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
5187b3d5298SRahul Lakkireddy 		  socket_id, pi->first_txqset);
5194a01078bSRahul Lakkireddy 
5204a01078bSRahul Lakkireddy 	/*  Free up the existing queue  */
5214a01078bSRahul Lakkireddy 	if (eth_dev->data->tx_queues[queue_idx]) {
5227483341aSXueming Li 		cxgbe_dev_tx_queue_release(eth_dev, queue_idx);
5234a01078bSRahul Lakkireddy 		eth_dev->data->tx_queues[queue_idx] = NULL;
5244a01078bSRahul Lakkireddy 	}
5254a01078bSRahul Lakkireddy 
5264a01078bSRahul Lakkireddy 	eth_dev->data->tx_queues[queue_idx] = (void *)txq;
5274a01078bSRahul Lakkireddy 
5284a01078bSRahul Lakkireddy 	/* Sanity Checking
5294a01078bSRahul Lakkireddy 	 *
5304a01078bSRahul Lakkireddy 	 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
5314a01078bSRahul Lakkireddy 	 */
5324a01078bSRahul Lakkireddy 	temp_nb_desc = nb_desc;
5334a01078bSRahul Lakkireddy 	if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
5344a01078bSRahul Lakkireddy 		dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
5354a01078bSRahul Lakkireddy 			 __func__, CXGBE_MIN_RING_DESC_SIZE,
5364a01078bSRahul Lakkireddy 			 CXGBE_DEFAULT_TX_DESC_SIZE);
5374a01078bSRahul Lakkireddy 		temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
5384a01078bSRahul Lakkireddy 	} else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
5394a01078bSRahul Lakkireddy 		dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
5404a01078bSRahul Lakkireddy 			__func__, CXGBE_MIN_RING_DESC_SIZE,
5414a01078bSRahul Lakkireddy 			CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
5424a01078bSRahul Lakkireddy 		return -(EINVAL);
5434a01078bSRahul Lakkireddy 	}
5444a01078bSRahul Lakkireddy 
5454a01078bSRahul Lakkireddy 	txq->q.size = temp_nb_desc;
5464a01078bSRahul Lakkireddy 
5474a01078bSRahul Lakkireddy 	err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
5484a01078bSRahul Lakkireddy 				   s->fw_evtq.cntxt_id, socket_id);
5494a01078bSRahul Lakkireddy 
5505e59e39aSKumar Sanghvi 	dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
5515e59e39aSKumar Sanghvi 		  __func__, txq->q.cntxt_id, txq->q.abs_id, err);
5524a01078bSRahul Lakkireddy 	return err;
5534a01078bSRahul Lakkireddy }
5544a01078bSRahul Lakkireddy 
cxgbe_dev_tx_queue_release(struct rte_eth_dev * eth_dev,uint16_t qid)5557483341aSXueming Li void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
5564a01078bSRahul Lakkireddy {
5577483341aSXueming Li 	struct sge_eth_txq *txq = eth_dev->data->tx_queues[qid];
5584a01078bSRahul Lakkireddy 
5594a01078bSRahul Lakkireddy 	if (txq) {
5604a01078bSRahul Lakkireddy 		struct port_info *pi = (struct port_info *)
5614a01078bSRahul Lakkireddy 				       (txq->eth_dev->data->dev_private);
5624a01078bSRahul Lakkireddy 		struct adapter *adap = pi->adapter;
5634a01078bSRahul Lakkireddy 
5644a01078bSRahul Lakkireddy 		dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
5654a01078bSRahul Lakkireddy 			  __func__, pi->port_id, txq->q.cntxt_id);
5664a01078bSRahul Lakkireddy 
5674a01078bSRahul Lakkireddy 		t4_sge_eth_txq_release(adap, txq);
5684a01078bSRahul Lakkireddy 	}
5694a01078bSRahul Lakkireddy }
5704a01078bSRahul Lakkireddy 
cxgbe_dev_rx_queue_start(struct rte_eth_dev * eth_dev,uint16_t rx_queue_id)571011ebc23SKumar Sanghvi int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
57292c8a632SRahul Lakkireddy {
57363a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
57492c8a632SRahul Lakkireddy 	struct adapter *adap = pi->adapter;
575e30e5407SRahul Lakkireddy 	struct sge_eth_rxq *rxq;
576e30e5407SRahul Lakkireddy 	int ret;
57792c8a632SRahul Lakkireddy 
57892c8a632SRahul Lakkireddy 	dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
57992c8a632SRahul Lakkireddy 		  __func__, pi->port_id, rx_queue_id);
58092c8a632SRahul Lakkireddy 
581e30e5407SRahul Lakkireddy 	rxq = eth_dev->data->rx_queues[rx_queue_id];
582e30e5407SRahul Lakkireddy 	ret = t4_sge_eth_rxq_start(adap, rxq);
5836b6861c1SPablo de Lara 	if (ret == 0)
5846b6861c1SPablo de Lara 		eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5856b6861c1SPablo de Lara 
5866b6861c1SPablo de Lara 	return ret;
58792c8a632SRahul Lakkireddy }
58892c8a632SRahul Lakkireddy 
cxgbe_dev_rx_queue_stop(struct rte_eth_dev * eth_dev,uint16_t rx_queue_id)589011ebc23SKumar Sanghvi int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
59092c8a632SRahul Lakkireddy {
59163a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
59292c8a632SRahul Lakkireddy 	struct adapter *adap = pi->adapter;
593e30e5407SRahul Lakkireddy 	struct sge_eth_rxq *rxq;
594e30e5407SRahul Lakkireddy 	int ret;
59592c8a632SRahul Lakkireddy 
59692c8a632SRahul Lakkireddy 	dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
59792c8a632SRahul Lakkireddy 		  __func__, pi->port_id, rx_queue_id);
59892c8a632SRahul Lakkireddy 
599e30e5407SRahul Lakkireddy 	rxq = eth_dev->data->rx_queues[rx_queue_id];
600e30e5407SRahul Lakkireddy 	ret = t4_sge_eth_rxq_stop(adap, rxq);
6016b6861c1SPablo de Lara 	if (ret == 0)
6026b6861c1SPablo de Lara 		eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
6036b6861c1SPablo de Lara 
6046b6861c1SPablo de Lara 	return ret;
60592c8a632SRahul Lakkireddy }
60692c8a632SRahul Lakkireddy 
cxgbe_dev_rx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mp)607011ebc23SKumar Sanghvi int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
60892c8a632SRahul Lakkireddy 			     uint16_t queue_idx, uint16_t nb_desc,
60992c8a632SRahul Lakkireddy 			     unsigned int socket_id,
610a4996bd8SWei Dai 			     const struct rte_eth_rxconf *rx_conf __rte_unused,
61192c8a632SRahul Lakkireddy 			     struct rte_mempool *mp)
61292c8a632SRahul Lakkireddy {
6131bb4a528SFerruh Yigit 	unsigned int pkt_len = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
6141bb4a528SFerruh Yigit 		RTE_ETHER_CRC_LEN;
61563a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
61692c8a632SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
6174b2eff45SRahul Lakkireddy 	struct rte_eth_dev_info dev_info;
6187b3d5298SRahul Lakkireddy 	struct sge *s = &adapter->sge;
6197b3d5298SRahul Lakkireddy 	unsigned int temp_nb_desc;
6207b3d5298SRahul Lakkireddy 	int err = 0, msi_idx = 0;
6217b3d5298SRahul Lakkireddy 	struct sge_eth_rxq *rxq;
62292c8a632SRahul Lakkireddy 
6237b3d5298SRahul Lakkireddy 	rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
62492c8a632SRahul Lakkireddy 	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
62592c8a632SRahul Lakkireddy 		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
62692c8a632SRahul Lakkireddy 		  socket_id, mp);
62792c8a632SRahul Lakkireddy 
628bdad90d1SIvan Ilchenko 	err = cxgbe_dev_info_get(eth_dev, &dev_info);
629bdad90d1SIvan Ilchenko 	if (err != 0) {
630bdad90d1SIvan Ilchenko 		dev_err(adap, "%s: error during getting ethernet device info",
631bdad90d1SIvan Ilchenko 			__func__);
632bdad90d1SIvan Ilchenko 		return err;
633bdad90d1SIvan Ilchenko 	}
6344b2eff45SRahul Lakkireddy 
63535b2d13fSOlivier Matz 	/* Must accommodate at least RTE_ETHER_MIN_MTU */
6364b2eff45SRahul Lakkireddy 	if ((pkt_len < dev_info.min_rx_bufsize) ||
6374b2eff45SRahul Lakkireddy 	    (pkt_len > dev_info.max_rx_pktlen)) {
6384b2eff45SRahul Lakkireddy 		dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
6394b2eff45SRahul Lakkireddy 			__func__, dev_info.min_rx_bufsize,
6404b2eff45SRahul Lakkireddy 			dev_info.max_rx_pktlen);
6414b2eff45SRahul Lakkireddy 		return -EINVAL;
6424b2eff45SRahul Lakkireddy 	}
6434b2eff45SRahul Lakkireddy 
64492c8a632SRahul Lakkireddy 	/*  Free up the existing queue  */
64592c8a632SRahul Lakkireddy 	if (eth_dev->data->rx_queues[queue_idx]) {
6467483341aSXueming Li 		cxgbe_dev_rx_queue_release(eth_dev, queue_idx);
64792c8a632SRahul Lakkireddy 		eth_dev->data->rx_queues[queue_idx] = NULL;
64892c8a632SRahul Lakkireddy 	}
64992c8a632SRahul Lakkireddy 
65092c8a632SRahul Lakkireddy 	eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
65192c8a632SRahul Lakkireddy 
65292c8a632SRahul Lakkireddy 	/* Sanity Checking
65392c8a632SRahul Lakkireddy 	 *
65492c8a632SRahul Lakkireddy 	 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
65592c8a632SRahul Lakkireddy 	 */
65692c8a632SRahul Lakkireddy 	temp_nb_desc = nb_desc;
65792c8a632SRahul Lakkireddy 	if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
65892c8a632SRahul Lakkireddy 		dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
65992c8a632SRahul Lakkireddy 			 __func__, CXGBE_MIN_RING_DESC_SIZE,
66092c8a632SRahul Lakkireddy 			 CXGBE_DEFAULT_RX_DESC_SIZE);
66192c8a632SRahul Lakkireddy 		temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
66292c8a632SRahul Lakkireddy 	} else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
66392c8a632SRahul Lakkireddy 		dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
66492c8a632SRahul Lakkireddy 			__func__, CXGBE_MIN_RING_DESC_SIZE,
66592c8a632SRahul Lakkireddy 			CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
66692c8a632SRahul Lakkireddy 		return -(EINVAL);
66792c8a632SRahul Lakkireddy 	}
66892c8a632SRahul Lakkireddy 
66992c8a632SRahul Lakkireddy 	rxq->rspq.size = temp_nb_desc;
67092c8a632SRahul Lakkireddy 	rxq->fl.size = temp_nb_desc;
67192c8a632SRahul Lakkireddy 
67292c8a632SRahul Lakkireddy 	err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
67334020f18SVishal Kulkarni 			       &rxq->fl, NULL,
6745e59e39aSKumar Sanghvi 			       is_pf4(adapter) ?
6755e59e39aSKumar Sanghvi 			       t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
67692c8a632SRahul Lakkireddy 			       queue_idx, socket_id);
67792c8a632SRahul Lakkireddy 
6785e59e39aSKumar Sanghvi 	dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
6795e59e39aSKumar Sanghvi 		  __func__, err, pi->port_id, rxq->rspq.cntxt_id,
6805e59e39aSKumar Sanghvi 		  rxq->rspq.abs_id);
68192c8a632SRahul Lakkireddy 	return err;
68292c8a632SRahul Lakkireddy }
68392c8a632SRahul Lakkireddy 
cxgbe_dev_rx_queue_release(struct rte_eth_dev * eth_dev,uint16_t qid)6847483341aSXueming Li void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
68592c8a632SRahul Lakkireddy {
6867483341aSXueming Li 	struct sge_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
68792c8a632SRahul Lakkireddy 
6887b3d5298SRahul Lakkireddy 	if (rxq) {
68992c8a632SRahul Lakkireddy 		struct port_info *pi = (struct port_info *)
6907b3d5298SRahul Lakkireddy 				       (rxq->rspq.eth_dev->data->dev_private);
69192c8a632SRahul Lakkireddy 		struct adapter *adap = pi->adapter;
69292c8a632SRahul Lakkireddy 
69392c8a632SRahul Lakkireddy 		dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
69492c8a632SRahul Lakkireddy 			  __func__, pi->port_id, rxq->rspq.cntxt_id);
69592c8a632SRahul Lakkireddy 
69692c8a632SRahul Lakkireddy 		t4_sge_eth_rxq_release(adap, rxq);
69792c8a632SRahul Lakkireddy 	}
69892c8a632SRahul Lakkireddy }
69992c8a632SRahul Lakkireddy 
700856505d3SRahul Lakkireddy /*
701856505d3SRahul Lakkireddy  * Get port statistics.
702856505d3SRahul Lakkireddy  */
cxgbe_dev_stats_get(struct rte_eth_dev * eth_dev,struct rte_eth_stats * eth_stats)703d5b0924bSMatan Azrad static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
704856505d3SRahul Lakkireddy 				struct rte_eth_stats *eth_stats)
705856505d3SRahul Lakkireddy {
70663a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
707856505d3SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
708856505d3SRahul Lakkireddy 	struct sge *s = &adapter->sge;
709856505d3SRahul Lakkireddy 	struct port_stats ps;
710856505d3SRahul Lakkireddy 	unsigned int i;
711856505d3SRahul Lakkireddy 
712856505d3SRahul Lakkireddy 	cxgbe_stats_get(pi, &ps);
713856505d3SRahul Lakkireddy 
714856505d3SRahul Lakkireddy 	/* RX Stats */
715856505d3SRahul Lakkireddy 	eth_stats->imissed  = ps.rx_ovflow0 + ps.rx_ovflow1 +
716856505d3SRahul Lakkireddy 			      ps.rx_ovflow2 + ps.rx_ovflow3 +
717856505d3SRahul Lakkireddy 			      ps.rx_trunc0 + ps.rx_trunc1 +
718856505d3SRahul Lakkireddy 			      ps.rx_trunc2 + ps.rx_trunc3;
719dfb0324bSRahul Lakkireddy 	for (i = 0; i < NCHAN; i++)
720dfb0324bSRahul Lakkireddy 		eth_stats->imissed += ps.rx_tp_tnl_cong_drops[i];
721dfb0324bSRahul Lakkireddy 
722b5d5b4a8SStephen Hemminger 	eth_stats->ierrors  = ps.rx_symbol_err + ps.rx_fcs_err +
723b5d5b4a8SStephen Hemminger 			      ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
72486057c99SIgor Ryzhov 			      ps.rx_len_err;
725856505d3SRahul Lakkireddy 
726856505d3SRahul Lakkireddy 	/* TX Stats */
727856505d3SRahul Lakkireddy 	eth_stats->opackets = ps.tx_frames;
728856505d3SRahul Lakkireddy 	eth_stats->obytes   = ps.tx_octets;
729856505d3SRahul Lakkireddy 	eth_stats->oerrors  = ps.tx_error_frames;
730856505d3SRahul Lakkireddy 
731856505d3SRahul Lakkireddy 	for (i = 0; i < pi->n_rx_qsets; i++) {
7328de9be97SRahul Lakkireddy 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
733856505d3SRahul Lakkireddy 
7348de9be97SRahul Lakkireddy 		eth_stats->ipackets += rxq->stats.pkts;
7358de9be97SRahul Lakkireddy 		eth_stats->ibytes += rxq->stats.rx_bytes;
736856505d3SRahul Lakkireddy 	}
737856505d3SRahul Lakkireddy 
738d5b0924bSMatan Azrad 	return 0;
739856505d3SRahul Lakkireddy }
740856505d3SRahul Lakkireddy 
741856505d3SRahul Lakkireddy /*
742856505d3SRahul Lakkireddy  * Reset port statistics.
743856505d3SRahul Lakkireddy  */
cxgbe_dev_stats_reset(struct rte_eth_dev * eth_dev)7449970a9adSIgor Romanov static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
745856505d3SRahul Lakkireddy {
74663a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
747856505d3SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
748856505d3SRahul Lakkireddy 	struct sge *s = &adapter->sge;
749856505d3SRahul Lakkireddy 	unsigned int i;
750856505d3SRahul Lakkireddy 
751856505d3SRahul Lakkireddy 	cxgbe_stats_reset(pi);
752856505d3SRahul Lakkireddy 	for (i = 0; i < pi->n_rx_qsets; i++) {
75318e44206SRahul Lakkireddy 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
754856505d3SRahul Lakkireddy 
75518e44206SRahul Lakkireddy 		memset(&rxq->stats, 0, sizeof(rxq->stats));
756856505d3SRahul Lakkireddy 	}
757856505d3SRahul Lakkireddy 	for (i = 0; i < pi->n_tx_qsets; i++) {
75818e44206SRahul Lakkireddy 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + i];
759856505d3SRahul Lakkireddy 
76018e44206SRahul Lakkireddy 		memset(&txq->stats, 0, sizeof(txq->stats));
761856505d3SRahul Lakkireddy 	}
7629970a9adSIgor Romanov 
7639970a9adSIgor Romanov 	return 0;
764856505d3SRahul Lakkireddy }
765856505d3SRahul Lakkireddy 
76618e44206SRahul Lakkireddy /* Store extended statistics names and its offset in stats structure  */
76718e44206SRahul Lakkireddy struct cxgbe_dev_xstats_name_off {
76818e44206SRahul Lakkireddy 	char name[RTE_ETH_XSTATS_NAME_SIZE];
76918e44206SRahul Lakkireddy 	unsigned int offset;
77018e44206SRahul Lakkireddy };
77118e44206SRahul Lakkireddy 
77218e44206SRahul Lakkireddy static const struct cxgbe_dev_xstats_name_off cxgbe_dev_rxq_stats_strings[] = {
77318e44206SRahul Lakkireddy 	{"packets", offsetof(struct sge_eth_rx_stats, pkts)},
77418e44206SRahul Lakkireddy 	{"bytes", offsetof(struct sge_eth_rx_stats, rx_bytes)},
77518e44206SRahul Lakkireddy 	{"checksum_offloads", offsetof(struct sge_eth_rx_stats, rx_cso)},
77618e44206SRahul Lakkireddy 	{"vlan_extractions", offsetof(struct sge_eth_rx_stats, vlan_ex)},
77718e44206SRahul Lakkireddy 	{"dropped_packets", offsetof(struct sge_eth_rx_stats, rx_drops)},
77818e44206SRahul Lakkireddy };
77918e44206SRahul Lakkireddy 
78018e44206SRahul Lakkireddy static const struct cxgbe_dev_xstats_name_off cxgbe_dev_txq_stats_strings[] = {
78118e44206SRahul Lakkireddy 	{"packets", offsetof(struct sge_eth_tx_stats, pkts)},
78218e44206SRahul Lakkireddy 	{"bytes", offsetof(struct sge_eth_tx_stats, tx_bytes)},
78318e44206SRahul Lakkireddy 	{"tso_requests", offsetof(struct sge_eth_tx_stats, tso)},
78418e44206SRahul Lakkireddy 	{"checksum_offloads", offsetof(struct sge_eth_tx_stats, tx_cso)},
78518e44206SRahul Lakkireddy 	{"vlan_insertions", offsetof(struct sge_eth_tx_stats, vlan_ins)},
78618e44206SRahul Lakkireddy 	{"packet_mapping_errors",
78718e44206SRahul Lakkireddy 	 offsetof(struct sge_eth_tx_stats, mapping_err)},
78818e44206SRahul Lakkireddy 	{"coalesced_wrs", offsetof(struct sge_eth_tx_stats, coal_wr)},
78918e44206SRahul Lakkireddy 	{"coalesced_packets", offsetof(struct sge_eth_tx_stats, coal_pkts)},
79018e44206SRahul Lakkireddy };
79118e44206SRahul Lakkireddy 
79218e44206SRahul Lakkireddy static const struct cxgbe_dev_xstats_name_off cxgbe_dev_port_stats_strings[] = {
79318e44206SRahul Lakkireddy 	{"tx_bytes", offsetof(struct port_stats, tx_octets)},
79418e44206SRahul Lakkireddy 	{"tx_packets", offsetof(struct port_stats, tx_frames)},
79518e44206SRahul Lakkireddy 	{"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
79618e44206SRahul Lakkireddy 	{"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
79718e44206SRahul Lakkireddy 	{"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
79818e44206SRahul Lakkireddy 	{"tx_error_packets", offsetof(struct port_stats, tx_error_frames)},
79918e44206SRahul Lakkireddy 	{"tx_size_64_packets", offsetof(struct port_stats, tx_frames_64)},
80018e44206SRahul Lakkireddy 	{"tx_size_65_to_127_packets",
80118e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_65_127)},
80218e44206SRahul Lakkireddy 	{"tx_size_128_to_255_packets",
80318e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_128_255)},
80418e44206SRahul Lakkireddy 	{"tx_size_256_to_511_packets",
80518e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_256_511)},
80618e44206SRahul Lakkireddy 	{"tx_size_512_to_1023_packets",
80718e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_512_1023)},
80818e44206SRahul Lakkireddy 	{"tx_size_1024_to_1518_packets",
80918e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_1024_1518)},
81018e44206SRahul Lakkireddy 	{"tx_size_1519_to_max_packets",
81118e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_1519_max)},
81218e44206SRahul Lakkireddy 	{"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
81318e44206SRahul Lakkireddy 	{"tx_pause_frames", offsetof(struct port_stats, tx_pause)},
81418e44206SRahul Lakkireddy 	{"tx_ppp_pri0_packets", offsetof(struct port_stats, tx_ppp0)},
81518e44206SRahul Lakkireddy 	{"tx_ppp_pri1_packets", offsetof(struct port_stats, tx_ppp1)},
81618e44206SRahul Lakkireddy 	{"tx_ppp_pri2_packets", offsetof(struct port_stats, tx_ppp2)},
81718e44206SRahul Lakkireddy 	{"tx_ppp_pri3_packets", offsetof(struct port_stats, tx_ppp3)},
81818e44206SRahul Lakkireddy 	{"tx_ppp_pri4_packets", offsetof(struct port_stats, tx_ppp4)},
81918e44206SRahul Lakkireddy 	{"tx_ppp_pri5_packets", offsetof(struct port_stats, tx_ppp5)},
82018e44206SRahul Lakkireddy 	{"tx_ppp_pri6_packets", offsetof(struct port_stats, tx_ppp6)},
82118e44206SRahul Lakkireddy 	{"tx_ppp_pri7_packets", offsetof(struct port_stats, tx_ppp7)},
82218e44206SRahul Lakkireddy 	{"rx_bytes", offsetof(struct port_stats, rx_octets)},
82318e44206SRahul Lakkireddy 	{"rx_packets", offsetof(struct port_stats, rx_frames)},
82418e44206SRahul Lakkireddy 	{"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
82518e44206SRahul Lakkireddy 	{"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
82618e44206SRahul Lakkireddy 	{"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
82718e44206SRahul Lakkireddy 	{"rx_too_long_packets", offsetof(struct port_stats, rx_too_long)},
82818e44206SRahul Lakkireddy 	{"rx_jabber_packets", offsetof(struct port_stats, rx_jabber)},
82918e44206SRahul Lakkireddy 	{"rx_fcs_error_packets", offsetof(struct port_stats, rx_fcs_err)},
83018e44206SRahul Lakkireddy 	{"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
83118e44206SRahul Lakkireddy 	{"rx_symbol_error_packets",
83218e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_symbol_err)},
83318e44206SRahul Lakkireddy 	{"rx_short_packets", offsetof(struct port_stats, rx_runt)},
83418e44206SRahul Lakkireddy 	{"rx_size_64_packets", offsetof(struct port_stats, rx_frames_64)},
83518e44206SRahul Lakkireddy 	{"rx_size_65_to_127_packets",
83618e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_65_127)},
83718e44206SRahul Lakkireddy 	{"rx_size_128_to_255_packets",
83818e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_128_255)},
83918e44206SRahul Lakkireddy 	{"rx_size_256_to_511_packets",
84018e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_256_511)},
84118e44206SRahul Lakkireddy 	{"rx_size_512_to_1023_packets",
84218e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_512_1023)},
84318e44206SRahul Lakkireddy 	{"rx_size_1024_to_1518_packets",
84418e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_1024_1518)},
84518e44206SRahul Lakkireddy 	{"rx_size_1519_to_max_packets",
84618e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_1519_max)},
84718e44206SRahul Lakkireddy 	{"rx_pause_packets", offsetof(struct port_stats, rx_pause)},
84818e44206SRahul Lakkireddy 	{"rx_ppp_pri0_packets", offsetof(struct port_stats, rx_ppp0)},
84918e44206SRahul Lakkireddy 	{"rx_ppp_pri1_packets", offsetof(struct port_stats, rx_ppp1)},
85018e44206SRahul Lakkireddy 	{"rx_ppp_pri2_packets", offsetof(struct port_stats, rx_ppp2)},
85118e44206SRahul Lakkireddy 	{"rx_ppp_pri3_packets", offsetof(struct port_stats, rx_ppp3)},
85218e44206SRahul Lakkireddy 	{"rx_ppp_pri4_packets", offsetof(struct port_stats, rx_ppp4)},
85318e44206SRahul Lakkireddy 	{"rx_ppp_pri5_packets", offsetof(struct port_stats, rx_ppp5)},
85418e44206SRahul Lakkireddy 	{"rx_ppp_pri6_packets", offsetof(struct port_stats, rx_ppp6)},
85518e44206SRahul Lakkireddy 	{"rx_ppp_pri7_packets", offsetof(struct port_stats, rx_ppp7)},
85618e44206SRahul Lakkireddy 	{"rx_bg0_dropped_packets", offsetof(struct port_stats, rx_ovflow0)},
85718e44206SRahul Lakkireddy 	{"rx_bg1_dropped_packets", offsetof(struct port_stats, rx_ovflow1)},
85818e44206SRahul Lakkireddy 	{"rx_bg2_dropped_packets", offsetof(struct port_stats, rx_ovflow2)},
85918e44206SRahul Lakkireddy 	{"rx_bg3_dropped_packets", offsetof(struct port_stats, rx_ovflow3)},
86018e44206SRahul Lakkireddy 	{"rx_bg0_truncated_packets", offsetof(struct port_stats, rx_trunc0)},
86118e44206SRahul Lakkireddy 	{"rx_bg1_truncated_packets", offsetof(struct port_stats, rx_trunc1)},
86218e44206SRahul Lakkireddy 	{"rx_bg2_truncated_packets", offsetof(struct port_stats, rx_trunc2)},
86318e44206SRahul Lakkireddy 	{"rx_bg3_truncated_packets", offsetof(struct port_stats, rx_trunc3)},
864dfb0324bSRahul Lakkireddy 	{"rx_tp_tnl_cong_drops0",
865dfb0324bSRahul Lakkireddy 	 offsetof(struct port_stats, rx_tp_tnl_cong_drops[0])},
866dfb0324bSRahul Lakkireddy 	{"rx_tp_tnl_cong_drops1",
867dfb0324bSRahul Lakkireddy 	 offsetof(struct port_stats, rx_tp_tnl_cong_drops[1])},
868dfb0324bSRahul Lakkireddy 	{"rx_tp_tnl_cong_drops2",
869dfb0324bSRahul Lakkireddy 	 offsetof(struct port_stats, rx_tp_tnl_cong_drops[2])},
870dfb0324bSRahul Lakkireddy 	{"rx_tp_tnl_cong_drops3",
871dfb0324bSRahul Lakkireddy 	 offsetof(struct port_stats, rx_tp_tnl_cong_drops[3])},
87218e44206SRahul Lakkireddy };
87318e44206SRahul Lakkireddy 
8745ec659a7SNikhil Vasoya static const struct cxgbe_dev_xstats_name_off
8755ec659a7SNikhil Vasoya cxgbevf_dev_port_stats_strings[] = {
8765ec659a7SNikhil Vasoya 	{"tx_bytes", offsetof(struct port_stats, tx_octets)},
8775ec659a7SNikhil Vasoya 	{"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
8785ec659a7SNikhil Vasoya 	{"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
8795ec659a7SNikhil Vasoya 	{"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
8805ec659a7SNikhil Vasoya 	{"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
8815ec659a7SNikhil Vasoya 	{"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
8825ec659a7SNikhil Vasoya 	{"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
8835ec659a7SNikhil Vasoya 	{"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
8845ec659a7SNikhil Vasoya 	{"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
8855ec659a7SNikhil Vasoya };
8865ec659a7SNikhil Vasoya 
88718e44206SRahul Lakkireddy #define CXGBE_NB_RXQ_STATS RTE_DIM(cxgbe_dev_rxq_stats_strings)
88818e44206SRahul Lakkireddy #define CXGBE_NB_TXQ_STATS RTE_DIM(cxgbe_dev_txq_stats_strings)
88918e44206SRahul Lakkireddy #define CXGBE_NB_PORT_STATS RTE_DIM(cxgbe_dev_port_stats_strings)
8905ec659a7SNikhil Vasoya #define CXGBEVF_NB_PORT_STATS RTE_DIM(cxgbevf_dev_port_stats_strings)
89118e44206SRahul Lakkireddy 
cxgbe_dev_xstats_count(struct port_info * pi)89218e44206SRahul Lakkireddy static u16 cxgbe_dev_xstats_count(struct port_info *pi)
89318e44206SRahul Lakkireddy {
8945ec659a7SNikhil Vasoya 	u16 count;
8955ec659a7SNikhil Vasoya 
8965ec659a7SNikhil Vasoya 	count = (pi->n_tx_qsets * CXGBE_NB_TXQ_STATS) +
89718e44206SRahul Lakkireddy 		(pi->n_rx_qsets * CXGBE_NB_RXQ_STATS);
8985ec659a7SNikhil Vasoya 
8995ec659a7SNikhil Vasoya 	if (is_pf4(pi->adapter) != 0)
9005ec659a7SNikhil Vasoya 		count += CXGBE_NB_PORT_STATS;
9015ec659a7SNikhil Vasoya 	else
9025ec659a7SNikhil Vasoya 		count += CXGBEVF_NB_PORT_STATS;
9035ec659a7SNikhil Vasoya 
9045ec659a7SNikhil Vasoya 	return count;
90518e44206SRahul Lakkireddy }
90618e44206SRahul Lakkireddy 
cxgbe_dev_xstats(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,struct rte_eth_xstat * xstats,unsigned int size)90718e44206SRahul Lakkireddy static int cxgbe_dev_xstats(struct rte_eth_dev *dev,
90818e44206SRahul Lakkireddy 			    struct rte_eth_xstat_name *xstats_names,
90918e44206SRahul Lakkireddy 			    struct rte_eth_xstat *xstats, unsigned int size)
91018e44206SRahul Lakkireddy {
91118e44206SRahul Lakkireddy 	const struct cxgbe_dev_xstats_name_off *xstats_str;
91218e44206SRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
91318e44206SRahul Lakkireddy 	struct adapter *adap = pi->adapter;
91418e44206SRahul Lakkireddy 	struct sge *s = &adap->sge;
9155ec659a7SNikhil Vasoya 	u16 count, i, qid, nstats;
91618e44206SRahul Lakkireddy 	struct port_stats ps;
91718e44206SRahul Lakkireddy 	u64 *stats_ptr;
91818e44206SRahul Lakkireddy 
91918e44206SRahul Lakkireddy 	count = cxgbe_dev_xstats_count(pi);
92018e44206SRahul Lakkireddy 	if (size < count)
92118e44206SRahul Lakkireddy 		return count;
92218e44206SRahul Lakkireddy 
9235ec659a7SNikhil Vasoya 	if (is_pf4(adap) != 0) {
9245ec659a7SNikhil Vasoya 		/* port stats for PF*/
92518e44206SRahul Lakkireddy 		cxgbe_stats_get(pi, &ps);
9265ec659a7SNikhil Vasoya 		xstats_str = cxgbe_dev_port_stats_strings;
9275ec659a7SNikhil Vasoya 		nstats = CXGBE_NB_PORT_STATS;
9285ec659a7SNikhil Vasoya 	} else {
9295ec659a7SNikhil Vasoya 		/* port stats for VF*/
9305ec659a7SNikhil Vasoya 		cxgbevf_stats_get(pi, &ps);
9315ec659a7SNikhil Vasoya 		xstats_str = cxgbevf_dev_port_stats_strings;
9325ec659a7SNikhil Vasoya 		nstats = CXGBEVF_NB_PORT_STATS;
9335ec659a7SNikhil Vasoya 	}
93418e44206SRahul Lakkireddy 
93518e44206SRahul Lakkireddy 	count = 0;
9365ec659a7SNikhil Vasoya 	for (i = 0; i < nstats; i++, count++) {
93718e44206SRahul Lakkireddy 		if (xstats_names != NULL)
93818e44206SRahul Lakkireddy 			snprintf(xstats_names[count].name,
93918e44206SRahul Lakkireddy 				 sizeof(xstats_names[count].name),
94018e44206SRahul Lakkireddy 				 "%s", xstats_str[i].name);
94118e44206SRahul Lakkireddy 		if (xstats != NULL) {
94218e44206SRahul Lakkireddy 			stats_ptr = RTE_PTR_ADD(&ps,
94318e44206SRahul Lakkireddy 						xstats_str[i].offset);
94418e44206SRahul Lakkireddy 			xstats[count].value = *stats_ptr;
94518e44206SRahul Lakkireddy 			xstats[count].id = count;
94618e44206SRahul Lakkireddy 		}
94718e44206SRahul Lakkireddy 	}
94818e44206SRahul Lakkireddy 
94918e44206SRahul Lakkireddy 	/* per-txq stats */
95018e44206SRahul Lakkireddy 	xstats_str = cxgbe_dev_txq_stats_strings;
95118e44206SRahul Lakkireddy 	for (qid = 0; qid < pi->n_tx_qsets; qid++) {
95218e44206SRahul Lakkireddy 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + qid];
95318e44206SRahul Lakkireddy 
95418e44206SRahul Lakkireddy 		for (i = 0; i < CXGBE_NB_TXQ_STATS; i++, count++) {
95518e44206SRahul Lakkireddy 			if (xstats_names != NULL)
95618e44206SRahul Lakkireddy 				snprintf(xstats_names[count].name,
95718e44206SRahul Lakkireddy 					 sizeof(xstats_names[count].name),
95818e44206SRahul Lakkireddy 					 "tx_q%u_%s",
95918e44206SRahul Lakkireddy 					 qid, xstats_str[i].name);
96018e44206SRahul Lakkireddy 			if (xstats != NULL) {
96118e44206SRahul Lakkireddy 				stats_ptr = RTE_PTR_ADD(&txq->stats,
96218e44206SRahul Lakkireddy 							xstats_str[i].offset);
96318e44206SRahul Lakkireddy 				xstats[count].value = *stats_ptr;
96418e44206SRahul Lakkireddy 				xstats[count].id = count;
96518e44206SRahul Lakkireddy 			}
96618e44206SRahul Lakkireddy 		}
96718e44206SRahul Lakkireddy 	}
96818e44206SRahul Lakkireddy 
96918e44206SRahul Lakkireddy 	/* per-rxq stats */
97018e44206SRahul Lakkireddy 	xstats_str = cxgbe_dev_rxq_stats_strings;
97118e44206SRahul Lakkireddy 	for (qid = 0; qid < pi->n_rx_qsets; qid++) {
97218e44206SRahul Lakkireddy 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + qid];
97318e44206SRahul Lakkireddy 
97418e44206SRahul Lakkireddy 		for (i = 0; i < CXGBE_NB_RXQ_STATS; i++, count++) {
97518e44206SRahul Lakkireddy 			if (xstats_names != NULL)
97618e44206SRahul Lakkireddy 				snprintf(xstats_names[count].name,
97718e44206SRahul Lakkireddy 					 sizeof(xstats_names[count].name),
97818e44206SRahul Lakkireddy 					 "rx_q%u_%s",
97918e44206SRahul Lakkireddy 					 qid, xstats_str[i].name);
98018e44206SRahul Lakkireddy 			if (xstats != NULL) {
98118e44206SRahul Lakkireddy 				stats_ptr = RTE_PTR_ADD(&rxq->stats,
98218e44206SRahul Lakkireddy 							xstats_str[i].offset);
98318e44206SRahul Lakkireddy 				xstats[count].value = *stats_ptr;
98418e44206SRahul Lakkireddy 				xstats[count].id = count;
98518e44206SRahul Lakkireddy 			}
98618e44206SRahul Lakkireddy 		}
98718e44206SRahul Lakkireddy 	}
98818e44206SRahul Lakkireddy 
98918e44206SRahul Lakkireddy 	return count;
99018e44206SRahul Lakkireddy }
99118e44206SRahul Lakkireddy 
99218e44206SRahul Lakkireddy /* Get port extended statistics by ID. */
cxgbe_dev_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int n)9935ec659a7SNikhil Vasoya int cxgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
99418e44206SRahul Lakkireddy 			       const uint64_t *ids, uint64_t *values,
99518e44206SRahul Lakkireddy 			       unsigned int n)
99618e44206SRahul Lakkireddy {
99718e44206SRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
99818e44206SRahul Lakkireddy 	struct rte_eth_xstat *xstats_copy;
99918e44206SRahul Lakkireddy 	u16 count, i;
100018e44206SRahul Lakkireddy 	int ret = 0;
100118e44206SRahul Lakkireddy 
100218e44206SRahul Lakkireddy 	count = cxgbe_dev_xstats_count(pi);
100318e44206SRahul Lakkireddy 	if (ids == NULL || values == NULL)
100418e44206SRahul Lakkireddy 		return count;
100518e44206SRahul Lakkireddy 
100618e44206SRahul Lakkireddy 	xstats_copy = rte_calloc(NULL, count, sizeof(*xstats_copy), 0);
100718e44206SRahul Lakkireddy 	if (xstats_copy == NULL)
100818e44206SRahul Lakkireddy 		return -ENOMEM;
100918e44206SRahul Lakkireddy 
101018e44206SRahul Lakkireddy 	cxgbe_dev_xstats(dev, NULL, xstats_copy, count);
101118e44206SRahul Lakkireddy 
101218e44206SRahul Lakkireddy 	for (i = 0; i < n; i++) {
101318e44206SRahul Lakkireddy 		if (ids[i] >= count) {
101418e44206SRahul Lakkireddy 			ret = -EINVAL;
101518e44206SRahul Lakkireddy 			goto out_err;
101618e44206SRahul Lakkireddy 		}
101718e44206SRahul Lakkireddy 		values[i] = xstats_copy[ids[i]].value;
101818e44206SRahul Lakkireddy 	}
101918e44206SRahul Lakkireddy 
102018e44206SRahul Lakkireddy 	ret = n;
102118e44206SRahul Lakkireddy 
102218e44206SRahul Lakkireddy out_err:
102318e44206SRahul Lakkireddy 	rte_free(xstats_copy);
102418e44206SRahul Lakkireddy 	return ret;
102518e44206SRahul Lakkireddy }
102618e44206SRahul Lakkireddy 
102718e44206SRahul Lakkireddy /* Get names of port extended statistics by ID. */
cxgbe_dev_xstats_get_names_by_id(struct rte_eth_dev * dev,const uint64_t * ids,struct rte_eth_xstat_name * xnames,unsigned int n)10285ec659a7SNikhil Vasoya int cxgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
10298c9f976fSAndrew Rybchenko 					    const uint64_t *ids,
103018e44206SRahul Lakkireddy 					    struct rte_eth_xstat_name *xnames,
10318c9f976fSAndrew Rybchenko 					    unsigned int n)
103218e44206SRahul Lakkireddy {
103318e44206SRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
103418e44206SRahul Lakkireddy 	struct rte_eth_xstat_name *xnames_copy;
103518e44206SRahul Lakkireddy 	u16 count, i;
103618e44206SRahul Lakkireddy 	int ret = 0;
103718e44206SRahul Lakkireddy 
103818e44206SRahul Lakkireddy 	count = cxgbe_dev_xstats_count(pi);
103918e44206SRahul Lakkireddy 	if (ids == NULL || xnames == NULL)
104018e44206SRahul Lakkireddy 		return count;
104118e44206SRahul Lakkireddy 
104218e44206SRahul Lakkireddy 	xnames_copy = rte_calloc(NULL, count, sizeof(*xnames_copy), 0);
104318e44206SRahul Lakkireddy 	if (xnames_copy == NULL)
104418e44206SRahul Lakkireddy 		return -ENOMEM;
104518e44206SRahul Lakkireddy 
104618e44206SRahul Lakkireddy 	cxgbe_dev_xstats(dev, xnames_copy, NULL, count);
104718e44206SRahul Lakkireddy 
104818e44206SRahul Lakkireddy 	for (i = 0; i < n; i++) {
104918e44206SRahul Lakkireddy 		if (ids[i] >= count) {
105018e44206SRahul Lakkireddy 			ret = -EINVAL;
105118e44206SRahul Lakkireddy 			goto out_err;
105218e44206SRahul Lakkireddy 		}
105318e44206SRahul Lakkireddy 		rte_strlcpy(xnames[i].name, xnames_copy[ids[i]].name,
105418e44206SRahul Lakkireddy 			    sizeof(xnames[i].name));
105518e44206SRahul Lakkireddy 	}
105618e44206SRahul Lakkireddy 
105718e44206SRahul Lakkireddy 	ret = n;
105818e44206SRahul Lakkireddy 
105918e44206SRahul Lakkireddy out_err:
106018e44206SRahul Lakkireddy 	rte_free(xnames_copy);
106118e44206SRahul Lakkireddy 	return ret;
106218e44206SRahul Lakkireddy }
106318e44206SRahul Lakkireddy 
106418e44206SRahul Lakkireddy /* Get port extended statistics. */
cxgbe_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)10655ec659a7SNikhil Vasoya int cxgbe_dev_xstats_get(struct rte_eth_dev *dev,
106618e44206SRahul Lakkireddy 			 struct rte_eth_xstat *xstats, unsigned int n)
106718e44206SRahul Lakkireddy {
106818e44206SRahul Lakkireddy 	return cxgbe_dev_xstats(dev, NULL, xstats, n);
106918e44206SRahul Lakkireddy }
107018e44206SRahul Lakkireddy 
107118e44206SRahul Lakkireddy /* Get names of port extended statistics. */
cxgbe_dev_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned int n)10725ec659a7SNikhil Vasoya int cxgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
107318e44206SRahul Lakkireddy 			       struct rte_eth_xstat_name *xstats_names,
107418e44206SRahul Lakkireddy 			       unsigned int n)
107518e44206SRahul Lakkireddy {
107618e44206SRahul Lakkireddy 	return cxgbe_dev_xstats(dev, xstats_names, NULL, n);
107718e44206SRahul Lakkireddy }
107818e44206SRahul Lakkireddy 
107918e44206SRahul Lakkireddy /* Reset port extended statistics. */
cxgbe_dev_xstats_reset(struct rte_eth_dev * dev)108018e44206SRahul Lakkireddy static int cxgbe_dev_xstats_reset(struct rte_eth_dev *dev)
108118e44206SRahul Lakkireddy {
108218e44206SRahul Lakkireddy 	return cxgbe_dev_stats_reset(dev);
108318e44206SRahul Lakkireddy }
108418e44206SRahul Lakkireddy 
cxgbe_flow_ctrl_get(struct rte_eth_dev * eth_dev,struct rte_eth_fc_conf * fc_conf)1085631dfc71SRahul Lakkireddy static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1086631dfc71SRahul Lakkireddy 			       struct rte_eth_fc_conf *fc_conf)
1087631dfc71SRahul Lakkireddy {
108863a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
1089631dfc71SRahul Lakkireddy 	struct link_config *lc = &pi->link_cfg;
1090a83041b1SKarra Satwik 	u8 rx_pause = 0, tx_pause = 0;
1091a83041b1SKarra Satwik 	u32 caps = lc->link_caps;
1092631dfc71SRahul Lakkireddy 
1093a83041b1SKarra Satwik 	if (caps & FW_PORT_CAP32_ANEG)
1094a83041b1SKarra Satwik 		fc_conf->autoneg = 1;
1095a83041b1SKarra Satwik 
1096a83041b1SKarra Satwik 	if (caps & FW_PORT_CAP32_FC_TX)
1097a83041b1SKarra Satwik 		tx_pause = 1;
1098a83041b1SKarra Satwik 
1099a83041b1SKarra Satwik 	if (caps & FW_PORT_CAP32_FC_RX)
1100a83041b1SKarra Satwik 		rx_pause = 1;
1101631dfc71SRahul Lakkireddy 
1102631dfc71SRahul Lakkireddy 	if (rx_pause && tx_pause)
1103295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_FULL;
1104631dfc71SRahul Lakkireddy 	else if (rx_pause)
1105295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
1106631dfc71SRahul Lakkireddy 	else if (tx_pause)
1107295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
1108631dfc71SRahul Lakkireddy 	else
1109295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_NONE;
1110631dfc71SRahul Lakkireddy 	return 0;
1111631dfc71SRahul Lakkireddy }
1112631dfc71SRahul Lakkireddy 
cxgbe_flow_ctrl_set(struct rte_eth_dev * eth_dev,struct rte_eth_fc_conf * fc_conf)1113631dfc71SRahul Lakkireddy static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1114631dfc71SRahul Lakkireddy 			       struct rte_eth_fc_conf *fc_conf)
1115631dfc71SRahul Lakkireddy {
111663a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
1117631dfc71SRahul Lakkireddy 	struct link_config *lc = &pi->link_cfg;
1118a83041b1SKarra Satwik 	u32 new_caps = lc->admin_caps;
1119a83041b1SKarra Satwik 	u8 tx_pause = 0, rx_pause = 0;
1120a83041b1SKarra Satwik 	int ret;
1121631dfc71SRahul Lakkireddy 
1122295968d1SFerruh Yigit 	if (fc_conf->mode == RTE_ETH_FC_FULL) {
1123a83041b1SKarra Satwik 		tx_pause = 1;
1124a83041b1SKarra Satwik 		rx_pause = 1;
1125295968d1SFerruh Yigit 	} else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
1126a83041b1SKarra Satwik 		tx_pause = 1;
1127295968d1SFerruh Yigit 	} else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
1128a83041b1SKarra Satwik 		rx_pause = 1;
1129631dfc71SRahul Lakkireddy 	}
1130631dfc71SRahul Lakkireddy 
1131a83041b1SKarra Satwik 	ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
1132a83041b1SKarra Satwik 				rx_pause, &new_caps);
1133a83041b1SKarra Satwik 	if (ret != 0)
1134a83041b1SKarra Satwik 		return ret;
1135631dfc71SRahul Lakkireddy 
1136a83041b1SKarra Satwik 	if (!fc_conf->autoneg) {
1137a83041b1SKarra Satwik 		if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
1138a83041b1SKarra Satwik 			new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
1139a83041b1SKarra Satwik 	} else {
1140a83041b1SKarra Satwik 		new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
1141a83041b1SKarra Satwik 	}
1142631dfc71SRahul Lakkireddy 
1143a83041b1SKarra Satwik 	if (new_caps != lc->admin_caps) {
1144a83041b1SKarra Satwik 		ret = t4_link_l1cfg(pi, new_caps);
1145a83041b1SKarra Satwik 		if (ret == 0)
1146a83041b1SKarra Satwik 			lc->admin_caps = new_caps;
1147a83041b1SKarra Satwik 	}
1148a83041b1SKarra Satwik 
1149a83041b1SKarra Satwik 	return ret;
1150631dfc71SRahul Lakkireddy }
1151631dfc71SRahul Lakkireddy 
1152011ebc23SKumar Sanghvi const uint32_t *
cxgbe_dev_supported_ptypes_get(struct rte_eth_dev * eth_dev,size_t * no_of_elements)1153ba6a168aSSivaramakrishnan Venkat cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev,
1154ba6a168aSSivaramakrishnan Venkat 			       size_t *no_of_elements)
115578a38edfSJianfeng Tan {
115678a38edfSJianfeng Tan 	static const uint32_t ptypes[] = {
115778a38edfSJianfeng Tan 		RTE_PTYPE_L3_IPV4,
115878a38edfSJianfeng Tan 		RTE_PTYPE_L3_IPV6,
115978a38edfSJianfeng Tan 	};
116078a38edfSJianfeng Tan 
1161ba6a168aSSivaramakrishnan Venkat 	if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts) {
1162ba6a168aSSivaramakrishnan Venkat 		*no_of_elements = RTE_DIM(ptypes);
116378a38edfSJianfeng Tan 		return ptypes;
1164ba6a168aSSivaramakrishnan Venkat 	}
116578a38edfSJianfeng Tan 	return NULL;
116678a38edfSJianfeng Tan }
116778a38edfSJianfeng Tan 
116808e21af9SKumar Sanghvi /* Update RSS hash configuration
116908e21af9SKumar Sanghvi  */
cxgbe_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)117008e21af9SKumar Sanghvi static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
117108e21af9SKumar Sanghvi 				     struct rte_eth_rss_conf *rss_conf)
117208e21af9SKumar Sanghvi {
117363a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
117408e21af9SKumar Sanghvi 	struct adapter *adapter = pi->adapter;
117508e21af9SKumar Sanghvi 	int err;
117608e21af9SKumar Sanghvi 
117708e21af9SKumar Sanghvi 	err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
117808e21af9SKumar Sanghvi 	if (err)
117908e21af9SKumar Sanghvi 		return err;
118008e21af9SKumar Sanghvi 
118108e21af9SKumar Sanghvi 	pi->rss_hf = rss_conf->rss_hf;
118208e21af9SKumar Sanghvi 
118308e21af9SKumar Sanghvi 	if (rss_conf->rss_key) {
118408e21af9SKumar Sanghvi 		u32 key[10], mod_key[10];
118508e21af9SKumar Sanghvi 		int i, j;
118608e21af9SKumar Sanghvi 
118708e21af9SKumar Sanghvi 		memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
118808e21af9SKumar Sanghvi 
118908e21af9SKumar Sanghvi 		for (i = 9, j = 0; i >= 0; i--, j++)
119008e21af9SKumar Sanghvi 			mod_key[j] = cpu_to_be32(key[i]);
119108e21af9SKumar Sanghvi 
119208e21af9SKumar Sanghvi 		t4_write_rss_key(adapter, mod_key, -1);
119308e21af9SKumar Sanghvi 	}
119408e21af9SKumar Sanghvi 
119508e21af9SKumar Sanghvi 	return 0;
119608e21af9SKumar Sanghvi }
119708e21af9SKumar Sanghvi 
119876aba8d7SKumar Sanghvi /* Get RSS hash configuration
119976aba8d7SKumar Sanghvi  */
cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)120076aba8d7SKumar Sanghvi static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
120176aba8d7SKumar Sanghvi 				       struct rte_eth_rss_conf *rss_conf)
120276aba8d7SKumar Sanghvi {
120363a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
120476aba8d7SKumar Sanghvi 	struct adapter *adapter = pi->adapter;
120576aba8d7SKumar Sanghvi 	u64 rss_hf = 0;
120676aba8d7SKumar Sanghvi 	u64 flags = 0;
120776aba8d7SKumar Sanghvi 	int err;
120876aba8d7SKumar Sanghvi 
120976aba8d7SKumar Sanghvi 	err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
121076aba8d7SKumar Sanghvi 				    &flags, NULL);
121176aba8d7SKumar Sanghvi 
121276aba8d7SKumar Sanghvi 	if (err)
121376aba8d7SKumar Sanghvi 		return err;
121476aba8d7SKumar Sanghvi 
121576aba8d7SKumar Sanghvi 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
1216d97aa415SRahul Lakkireddy 		rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
121776aba8d7SKumar Sanghvi 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1218d97aa415SRahul Lakkireddy 			rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
121976aba8d7SKumar Sanghvi 	}
122076aba8d7SKumar Sanghvi 
122176aba8d7SKumar Sanghvi 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1222d97aa415SRahul Lakkireddy 		rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
122376aba8d7SKumar Sanghvi 
122476aba8d7SKumar Sanghvi 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
1225295968d1SFerruh Yigit 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
122676aba8d7SKumar Sanghvi 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1227295968d1SFerruh Yigit 			rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
122876aba8d7SKumar Sanghvi 	}
122976aba8d7SKumar Sanghvi 
123076aba8d7SKumar Sanghvi 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1231d97aa415SRahul Lakkireddy 		rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
123276aba8d7SKumar Sanghvi 
123376aba8d7SKumar Sanghvi 	rss_conf->rss_hf = rss_hf;
123476aba8d7SKumar Sanghvi 
123576aba8d7SKumar Sanghvi 	if (rss_conf->rss_key) {
123676aba8d7SKumar Sanghvi 		u32 key[10], mod_key[10];
123776aba8d7SKumar Sanghvi 		int i, j;
123876aba8d7SKumar Sanghvi 
123976aba8d7SKumar Sanghvi 		t4_read_rss_key(adapter, key);
124076aba8d7SKumar Sanghvi 
124176aba8d7SKumar Sanghvi 		for (i = 9, j = 0; i >= 0; i--, j++)
124276aba8d7SKumar Sanghvi 			mod_key[j] = be32_to_cpu(key[i]);
124376aba8d7SKumar Sanghvi 
124476aba8d7SKumar Sanghvi 		memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
124576aba8d7SKumar Sanghvi 	}
124676aba8d7SKumar Sanghvi 
124776aba8d7SKumar Sanghvi 	return 0;
124876aba8d7SKumar Sanghvi }
124976aba8d7SKumar Sanghvi 
cxgbe_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1250f2d344dfSRahul Lakkireddy static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
1251f2d344dfSRahul Lakkireddy 				     struct rte_eth_rss_reta_entry64 *reta_conf,
1252f2d344dfSRahul Lakkireddy 				     uint16_t reta_size)
1253f2d344dfSRahul Lakkireddy {
1254f2d344dfSRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
1255f2d344dfSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
1256f2d344dfSRahul Lakkireddy 	u16 i, idx, shift, *rss;
1257f2d344dfSRahul Lakkireddy 	int ret;
1258f2d344dfSRahul Lakkireddy 
1259f2d344dfSRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE))
1260f2d344dfSRahul Lakkireddy 		return -ENOMEM;
1261f2d344dfSRahul Lakkireddy 
1262f2d344dfSRahul Lakkireddy 	if (!reta_size || reta_size > pi->rss_size)
1263f2d344dfSRahul Lakkireddy 		return -EINVAL;
1264f2d344dfSRahul Lakkireddy 
1265f2d344dfSRahul Lakkireddy 	rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
1266f2d344dfSRahul Lakkireddy 	if (!rss)
1267f2d344dfSRahul Lakkireddy 		return -ENOMEM;
1268f2d344dfSRahul Lakkireddy 
1269f2d344dfSRahul Lakkireddy 	rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
1270f2d344dfSRahul Lakkireddy 	for (i = 0; i < reta_size; i++) {
1271295968d1SFerruh Yigit 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1272295968d1SFerruh Yigit 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1273f2d344dfSRahul Lakkireddy 		if (!(reta_conf[idx].mask & (1ULL << shift)))
1274f2d344dfSRahul Lakkireddy 			continue;
1275f2d344dfSRahul Lakkireddy 
1276f2d344dfSRahul Lakkireddy 		rss[i] = reta_conf[idx].reta[shift];
1277f2d344dfSRahul Lakkireddy 	}
1278f2d344dfSRahul Lakkireddy 
1279f2d344dfSRahul Lakkireddy 	ret = cxgbe_write_rss(pi, rss);
1280f2d344dfSRahul Lakkireddy 	if (!ret)
1281f2d344dfSRahul Lakkireddy 		rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
1282f2d344dfSRahul Lakkireddy 
1283f2d344dfSRahul Lakkireddy 	rte_free(rss);
1284f2d344dfSRahul Lakkireddy 	return ret;
1285f2d344dfSRahul Lakkireddy }
1286f2d344dfSRahul Lakkireddy 
cxgbe_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)1287f2d344dfSRahul Lakkireddy static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
1288f2d344dfSRahul Lakkireddy 				    struct rte_eth_rss_reta_entry64 *reta_conf,
1289f2d344dfSRahul Lakkireddy 				    uint16_t reta_size)
1290f2d344dfSRahul Lakkireddy {
1291f2d344dfSRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
1292f2d344dfSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
1293f2d344dfSRahul Lakkireddy 	u16 i, idx, shift;
1294f2d344dfSRahul Lakkireddy 
1295f2d344dfSRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE))
1296f2d344dfSRahul Lakkireddy 		return -ENOMEM;
1297f2d344dfSRahul Lakkireddy 
1298f2d344dfSRahul Lakkireddy 	if (!reta_size || reta_size > pi->rss_size)
1299f2d344dfSRahul Lakkireddy 		return -EINVAL;
1300f2d344dfSRahul Lakkireddy 
1301f2d344dfSRahul Lakkireddy 	for (i = 0; i < reta_size; i++) {
1302295968d1SFerruh Yigit 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1303295968d1SFerruh Yigit 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1304f2d344dfSRahul Lakkireddy 		if (!(reta_conf[idx].mask & (1ULL << shift)))
1305f2d344dfSRahul Lakkireddy 			continue;
1306f2d344dfSRahul Lakkireddy 
1307f2d344dfSRahul Lakkireddy 		reta_conf[idx].reta[shift] = pi->rss[i];
1308f2d344dfSRahul Lakkireddy 	}
1309f2d344dfSRahul Lakkireddy 
1310f2d344dfSRahul Lakkireddy 	return 0;
1311f2d344dfSRahul Lakkireddy }
1312f2d344dfSRahul Lakkireddy 
cxgbe_get_eeprom_length(struct rte_eth_dev * dev)1313fe0bd9eeSRahul Lakkireddy static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
1314fe0bd9eeSRahul Lakkireddy {
1315fe0bd9eeSRahul Lakkireddy 	RTE_SET_USED(dev);
1316fe0bd9eeSRahul Lakkireddy 	return EEPROMSIZE;
1317fe0bd9eeSRahul Lakkireddy }
1318fe0bd9eeSRahul Lakkireddy 
1319fe0bd9eeSRahul Lakkireddy /**
1320fe0bd9eeSRahul Lakkireddy  * eeprom_ptov - translate a physical EEPROM address to virtual
1321fe0bd9eeSRahul Lakkireddy  * @phys_addr: the physical EEPROM address
1322fe0bd9eeSRahul Lakkireddy  * @fn: the PCI function number
1323fe0bd9eeSRahul Lakkireddy  * @sz: size of function-specific area
1324fe0bd9eeSRahul Lakkireddy  *
1325fe0bd9eeSRahul Lakkireddy  * Translate a physical EEPROM address to virtual.  The first 1K is
1326fe0bd9eeSRahul Lakkireddy  * accessed through virtual addresses starting at 31K, the rest is
1327fe0bd9eeSRahul Lakkireddy  * accessed through virtual addresses starting at 0.
1328fe0bd9eeSRahul Lakkireddy  *
1329fe0bd9eeSRahul Lakkireddy  * The mapping is as follows:
1330fe0bd9eeSRahul Lakkireddy  * [0..1K) -> [31K..32K)
1331fe0bd9eeSRahul Lakkireddy  * [1K..1K+A) -> [31K-A..31K)
1332fe0bd9eeSRahul Lakkireddy  * [1K+A..ES) -> [0..ES-A-1K)
1333fe0bd9eeSRahul Lakkireddy  *
1334fe0bd9eeSRahul Lakkireddy  * where A = @fn * @sz, and ES = EEPROM size.
1335fe0bd9eeSRahul Lakkireddy  */
eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)1336fe0bd9eeSRahul Lakkireddy static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1337fe0bd9eeSRahul Lakkireddy {
1338fe0bd9eeSRahul Lakkireddy 	fn *= sz;
1339fe0bd9eeSRahul Lakkireddy 	if (phys_addr < 1024)
1340fe0bd9eeSRahul Lakkireddy 		return phys_addr + (31 << 10);
1341fe0bd9eeSRahul Lakkireddy 	if (phys_addr < 1024 + fn)
1342fe0bd9eeSRahul Lakkireddy 		return fn + phys_addr - 1024;
1343fe0bd9eeSRahul Lakkireddy 	if (phys_addr < EEPROMSIZE)
1344fe0bd9eeSRahul Lakkireddy 		return phys_addr - 1024 - fn;
1345fe0bd9eeSRahul Lakkireddy 	if (phys_addr < EEPROMVSIZE)
1346fe0bd9eeSRahul Lakkireddy 		return phys_addr - 1024;
1347fe0bd9eeSRahul Lakkireddy 	return -EINVAL;
1348fe0bd9eeSRahul Lakkireddy }
1349fe0bd9eeSRahul Lakkireddy 
1350fe0bd9eeSRahul Lakkireddy /* The next two routines implement eeprom read/write from physical addresses.
1351fe0bd9eeSRahul Lakkireddy  */
eeprom_rd_phys(struct adapter * adap,unsigned int phys_addr,u32 * v)1352fe0bd9eeSRahul Lakkireddy static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1353fe0bd9eeSRahul Lakkireddy {
1354fe0bd9eeSRahul Lakkireddy 	int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1355fe0bd9eeSRahul Lakkireddy 
1356fe0bd9eeSRahul Lakkireddy 	if (vaddr >= 0)
1357fe0bd9eeSRahul Lakkireddy 		vaddr = t4_seeprom_read(adap, vaddr, v);
1358fe0bd9eeSRahul Lakkireddy 	return vaddr < 0 ? vaddr : 0;
1359fe0bd9eeSRahul Lakkireddy }
1360fe0bd9eeSRahul Lakkireddy 
eeprom_wr_phys(struct adapter * adap,unsigned int phys_addr,u32 v)1361fe0bd9eeSRahul Lakkireddy static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1362fe0bd9eeSRahul Lakkireddy {
1363fe0bd9eeSRahul Lakkireddy 	int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1364fe0bd9eeSRahul Lakkireddy 
1365fe0bd9eeSRahul Lakkireddy 	if (vaddr >= 0)
1366fe0bd9eeSRahul Lakkireddy 		vaddr = t4_seeprom_write(adap, vaddr, v);
1367fe0bd9eeSRahul Lakkireddy 	return vaddr < 0 ? vaddr : 0;
1368fe0bd9eeSRahul Lakkireddy }
1369fe0bd9eeSRahul Lakkireddy 
1370fe0bd9eeSRahul Lakkireddy #define EEPROM_MAGIC 0x38E2F10C
1371fe0bd9eeSRahul Lakkireddy 
cxgbe_get_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * e)1372fe0bd9eeSRahul Lakkireddy static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
1373fe0bd9eeSRahul Lakkireddy 			    struct rte_dev_eeprom_info *e)
1374fe0bd9eeSRahul Lakkireddy {
137563a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
1376fe0bd9eeSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
1377fe0bd9eeSRahul Lakkireddy 	u32 i, err = 0;
1378fe0bd9eeSRahul Lakkireddy 	u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
1379fe0bd9eeSRahul Lakkireddy 
1380fe0bd9eeSRahul Lakkireddy 	if (!buf)
1381fe0bd9eeSRahul Lakkireddy 		return -ENOMEM;
1382fe0bd9eeSRahul Lakkireddy 
1383fe0bd9eeSRahul Lakkireddy 	e->magic = EEPROM_MAGIC;
1384fe0bd9eeSRahul Lakkireddy 	for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
1385fe0bd9eeSRahul Lakkireddy 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1386fe0bd9eeSRahul Lakkireddy 
1387fe0bd9eeSRahul Lakkireddy 	if (!err)
1388fe0bd9eeSRahul Lakkireddy 		rte_memcpy(e->data, buf + e->offset, e->length);
1389fe0bd9eeSRahul Lakkireddy 	rte_free(buf);
1390fe0bd9eeSRahul Lakkireddy 	return err;
1391fe0bd9eeSRahul Lakkireddy }
1392fe0bd9eeSRahul Lakkireddy 
cxgbe_set_eeprom(struct rte_eth_dev * dev,struct rte_dev_eeprom_info * eeprom)1393fe0bd9eeSRahul Lakkireddy static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
1394fe0bd9eeSRahul Lakkireddy 			    struct rte_dev_eeprom_info *eeprom)
1395fe0bd9eeSRahul Lakkireddy {
139663a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
1397fe0bd9eeSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
1398fe0bd9eeSRahul Lakkireddy 	u8 *buf;
1399fe0bd9eeSRahul Lakkireddy 	int err = 0;
1400fe0bd9eeSRahul Lakkireddy 	u32 aligned_offset, aligned_len, *p;
1401fe0bd9eeSRahul Lakkireddy 
1402fe0bd9eeSRahul Lakkireddy 	if (eeprom->magic != EEPROM_MAGIC)
1403fe0bd9eeSRahul Lakkireddy 		return -EINVAL;
1404fe0bd9eeSRahul Lakkireddy 
1405fe0bd9eeSRahul Lakkireddy 	aligned_offset = eeprom->offset & ~3;
1406fe0bd9eeSRahul Lakkireddy 	aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1407fe0bd9eeSRahul Lakkireddy 
1408fe0bd9eeSRahul Lakkireddy 	if (adapter->pf > 0) {
1409fe0bd9eeSRahul Lakkireddy 		u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1410fe0bd9eeSRahul Lakkireddy 
1411fe0bd9eeSRahul Lakkireddy 		if (aligned_offset < start ||
1412fe0bd9eeSRahul Lakkireddy 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
1413fe0bd9eeSRahul Lakkireddy 			return -EPERM;
1414fe0bd9eeSRahul Lakkireddy 	}
1415fe0bd9eeSRahul Lakkireddy 
1416fe0bd9eeSRahul Lakkireddy 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1417fe0bd9eeSRahul Lakkireddy 		/* RMW possibly needed for first or last words.
1418fe0bd9eeSRahul Lakkireddy 		 */
1419fe0bd9eeSRahul Lakkireddy 		buf = rte_zmalloc(NULL, aligned_len, 0);
1420fe0bd9eeSRahul Lakkireddy 		if (!buf)
1421fe0bd9eeSRahul Lakkireddy 			return -ENOMEM;
1422fe0bd9eeSRahul Lakkireddy 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1423fe0bd9eeSRahul Lakkireddy 		if (!err && aligned_len > 4)
1424fe0bd9eeSRahul Lakkireddy 			err = eeprom_rd_phys(adapter,
1425fe0bd9eeSRahul Lakkireddy 					     aligned_offset + aligned_len - 4,
1426fe0bd9eeSRahul Lakkireddy 					     (u32 *)&buf[aligned_len - 4]);
1427fe0bd9eeSRahul Lakkireddy 		if (err)
1428fe0bd9eeSRahul Lakkireddy 			goto out;
1429fe0bd9eeSRahul Lakkireddy 		rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1430fe0bd9eeSRahul Lakkireddy 			   eeprom->length);
1431fe0bd9eeSRahul Lakkireddy 	} else {
1432fe0bd9eeSRahul Lakkireddy 		buf = eeprom->data;
1433fe0bd9eeSRahul Lakkireddy 	}
1434fe0bd9eeSRahul Lakkireddy 
1435fe0bd9eeSRahul Lakkireddy 	err = t4_seeprom_wp(adapter, false);
1436fe0bd9eeSRahul Lakkireddy 	if (err)
1437fe0bd9eeSRahul Lakkireddy 		goto out;
1438fe0bd9eeSRahul Lakkireddy 
1439fe0bd9eeSRahul Lakkireddy 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1440fe0bd9eeSRahul Lakkireddy 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
1441fe0bd9eeSRahul Lakkireddy 		aligned_offset += 4;
1442fe0bd9eeSRahul Lakkireddy 	}
1443fe0bd9eeSRahul Lakkireddy 
1444fe0bd9eeSRahul Lakkireddy 	if (!err)
1445fe0bd9eeSRahul Lakkireddy 		err = t4_seeprom_wp(adapter, true);
1446fe0bd9eeSRahul Lakkireddy out:
1447fe0bd9eeSRahul Lakkireddy 	if (buf != eeprom->data)
1448fe0bd9eeSRahul Lakkireddy 		rte_free(buf);
1449fe0bd9eeSRahul Lakkireddy 	return err;
1450fe0bd9eeSRahul Lakkireddy }
1451fe0bd9eeSRahul Lakkireddy 
cxgbe_get_regs_len(struct rte_eth_dev * eth_dev)145217ba077cSRahul Lakkireddy static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
145317ba077cSRahul Lakkireddy {
145463a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
145517ba077cSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
145617ba077cSRahul Lakkireddy 
145717ba077cSRahul Lakkireddy 	return t4_get_regs_len(adapter) / sizeof(uint32_t);
145817ba077cSRahul Lakkireddy }
145917ba077cSRahul Lakkireddy 
cxgbe_get_regs(struct rte_eth_dev * eth_dev,struct rte_dev_reg_info * regs)146017ba077cSRahul Lakkireddy static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
146117ba077cSRahul Lakkireddy 			  struct rte_dev_reg_info *regs)
146217ba077cSRahul Lakkireddy {
146363a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
146417ba077cSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
146517ba077cSRahul Lakkireddy 
146617ba077cSRahul Lakkireddy 	regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
146717ba077cSRahul Lakkireddy 		(CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
146817ba077cSRahul Lakkireddy 		(1 << 16);
1469001a1c0fSZyta Szpak 
1470001a1c0fSZyta Szpak 	if (regs->data == NULL) {
1471001a1c0fSZyta Szpak 		regs->length = cxgbe_get_regs_len(eth_dev);
1472001a1c0fSZyta Szpak 		regs->width = sizeof(uint32_t);
1473001a1c0fSZyta Szpak 
1474001a1c0fSZyta Szpak 		return 0;
1475001a1c0fSZyta Szpak 	}
1476001a1c0fSZyta Szpak 
147717ba077cSRahul Lakkireddy 	t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
147817ba077cSRahul Lakkireddy 
147917ba077cSRahul Lakkireddy 	return 0;
148017ba077cSRahul Lakkireddy }
148117ba077cSRahul Lakkireddy 
cxgbe_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * addr)14826d13ea8eSOlivier Matz int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
14830c4a5dfcSKumar Sanghvi {
148463a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
14850c4a5dfcSKumar Sanghvi 	int ret;
14860c4a5dfcSKumar Sanghvi 
1487fefee7a6SShagun Agrawal 	ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
14880c4a5dfcSKumar Sanghvi 	if (ret < 0) {
14890c4a5dfcSKumar Sanghvi 		dev_err(adapter, "failed to set mac addr; err = %d\n",
14900c4a5dfcSKumar Sanghvi 			ret);
1491caccf8b3SOlivier Matz 		return ret;
14920c4a5dfcSKumar Sanghvi 	}
14930c4a5dfcSKumar Sanghvi 	pi->xact_addr_filt = ret;
1494caccf8b3SOlivier Matz 	return 0;
14950c4a5dfcSKumar Sanghvi }
14960c4a5dfcSKumar Sanghvi 
cxgbe_fec_get_capa_speed_to_fec(struct link_config * lc,struct rte_eth_fec_capa * capa_arr)149762aafe03SKarra Satwik static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
149862aafe03SKarra Satwik 					   struct rte_eth_fec_capa *capa_arr)
149962aafe03SKarra Satwik {
150062aafe03SKarra Satwik 	int num = 0;
150162aafe03SKarra Satwik 
150262aafe03SKarra Satwik 	if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
150362aafe03SKarra Satwik 		if (capa_arr) {
1504295968d1SFerruh Yigit 			capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
150562aafe03SKarra Satwik 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
150662aafe03SKarra Satwik 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
150762aafe03SKarra Satwik 		}
150862aafe03SKarra Satwik 		num++;
150962aafe03SKarra Satwik 	}
151062aafe03SKarra Satwik 
151162aafe03SKarra Satwik 	if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
151262aafe03SKarra Satwik 		if (capa_arr) {
1513295968d1SFerruh Yigit 			capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
151462aafe03SKarra Satwik 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
151562aafe03SKarra Satwik 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
151662aafe03SKarra Satwik 		}
151762aafe03SKarra Satwik 		num++;
151862aafe03SKarra Satwik 	}
151962aafe03SKarra Satwik 
152062aafe03SKarra Satwik 	if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
152162aafe03SKarra Satwik 		if (capa_arr) {
1522295968d1SFerruh Yigit 			capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
152362aafe03SKarra Satwik 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
152462aafe03SKarra Satwik 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
152562aafe03SKarra Satwik 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
152662aafe03SKarra Satwik 		}
152762aafe03SKarra Satwik 		num++;
152862aafe03SKarra Satwik 	}
152962aafe03SKarra Satwik 
153062aafe03SKarra Satwik 	return num;
153162aafe03SKarra Satwik }
153262aafe03SKarra Satwik 
cxgbe_fec_get_capability(struct rte_eth_dev * dev,struct rte_eth_fec_capa * speed_fec_capa,unsigned int num)153362aafe03SKarra Satwik static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
153462aafe03SKarra Satwik 				    struct rte_eth_fec_capa *speed_fec_capa,
153562aafe03SKarra Satwik 				    unsigned int num)
153662aafe03SKarra Satwik {
153762aafe03SKarra Satwik 	struct port_info *pi = dev->data->dev_private;
153862aafe03SKarra Satwik 	struct link_config *lc = &pi->link_cfg;
153962aafe03SKarra Satwik 	u8 num_entries;
154062aafe03SKarra Satwik 
154162aafe03SKarra Satwik 	if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
154262aafe03SKarra Satwik 		return -EOPNOTSUPP;
154362aafe03SKarra Satwik 
154462aafe03SKarra Satwik 	num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
154562aafe03SKarra Satwik 	if (!speed_fec_capa || num < num_entries)
154662aafe03SKarra Satwik 		return num_entries;
154762aafe03SKarra Satwik 
154862aafe03SKarra Satwik 	return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
154962aafe03SKarra Satwik }
155062aafe03SKarra Satwik 
cxgbe_fec_get(struct rte_eth_dev * dev,uint32_t * fec_capa)155162aafe03SKarra Satwik static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
155262aafe03SKarra Satwik {
155362aafe03SKarra Satwik 	struct port_info *pi = dev->data->dev_private;
155462aafe03SKarra Satwik 	struct link_config *lc = &pi->link_cfg;
155562aafe03SKarra Satwik 	u32 fec_caps = 0, caps = lc->link_caps;
155662aafe03SKarra Satwik 
155762aafe03SKarra Satwik 	if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
155862aafe03SKarra Satwik 		return -EOPNOTSUPP;
155962aafe03SKarra Satwik 
156062aafe03SKarra Satwik 	if (caps & FW_PORT_CAP32_FEC_RS)
156162aafe03SKarra Satwik 		fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
156262aafe03SKarra Satwik 	else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
156362aafe03SKarra Satwik 		fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
156462aafe03SKarra Satwik 	else
156562aafe03SKarra Satwik 		fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
156662aafe03SKarra Satwik 
156762aafe03SKarra Satwik 	*fec_capa = fec_caps;
156862aafe03SKarra Satwik 	return 0;
156962aafe03SKarra Satwik }
157062aafe03SKarra Satwik 
cxgbe_fec_set(struct rte_eth_dev * dev,uint32_t fec_capa)157162aafe03SKarra Satwik static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
157262aafe03SKarra Satwik {
157362aafe03SKarra Satwik 	struct port_info *pi = dev->data->dev_private;
157462aafe03SKarra Satwik 	u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
157562aafe03SKarra Satwik 	struct link_config *lc = &pi->link_cfg;
157662aafe03SKarra Satwik 	u32 new_caps = lc->admin_caps;
157762aafe03SKarra Satwik 	int ret;
157862aafe03SKarra Satwik 
157962aafe03SKarra Satwik 	if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
158062aafe03SKarra Satwik 		return -EOPNOTSUPP;
158162aafe03SKarra Satwik 
158262aafe03SKarra Satwik 	if (!fec_capa)
158362aafe03SKarra Satwik 		return -EINVAL;
158462aafe03SKarra Satwik 
158562aafe03SKarra Satwik 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
158662aafe03SKarra Satwik 		goto set_fec;
158762aafe03SKarra Satwik 
158862aafe03SKarra Satwik 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
158962aafe03SKarra Satwik 		fec_none = 1;
159062aafe03SKarra Satwik 
159162aafe03SKarra Satwik 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
159262aafe03SKarra Satwik 		fec_baser = 1;
159362aafe03SKarra Satwik 
159462aafe03SKarra Satwik 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
159562aafe03SKarra Satwik 		fec_rs = 1;
159662aafe03SKarra Satwik 
159762aafe03SKarra Satwik set_fec:
159862aafe03SKarra Satwik 	ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
159962aafe03SKarra Satwik 	if (ret != 0)
160062aafe03SKarra Satwik 		return ret;
160162aafe03SKarra Satwik 
160262aafe03SKarra Satwik 	if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
160362aafe03SKarra Satwik 		new_caps |= FW_PORT_CAP32_FORCE_FEC;
160462aafe03SKarra Satwik 	else
160562aafe03SKarra Satwik 		new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
160662aafe03SKarra Satwik 
160762aafe03SKarra Satwik 	if (new_caps != lc->admin_caps) {
160862aafe03SKarra Satwik 		ret = t4_link_l1cfg(pi, new_caps);
160962aafe03SKarra Satwik 		if (ret == 0)
161062aafe03SKarra Satwik 			lc->admin_caps = new_caps;
161162aafe03SKarra Satwik 	}
161262aafe03SKarra Satwik 
161362aafe03SKarra Satwik 	return ret;
161462aafe03SKarra Satwik }
161562aafe03SKarra Satwik 
cxgbe_fw_version_get(struct rte_eth_dev * dev,char * fw_version,size_t fw_size)16161cd22be2SNikhil Vasoya int cxgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
16171cd22be2SNikhil Vasoya 			 size_t fw_size)
16181cd22be2SNikhil Vasoya {
16191cd22be2SNikhil Vasoya 	struct port_info *pi = dev->data->dev_private;
16201cd22be2SNikhil Vasoya 	struct adapter *adapter = pi->adapter;
16211cd22be2SNikhil Vasoya 	int ret;
16221cd22be2SNikhil Vasoya 
16231cd22be2SNikhil Vasoya 	if (adapter->params.fw_vers == 0)
16241cd22be2SNikhil Vasoya 		return -EIO;
16251cd22be2SNikhil Vasoya 
16261cd22be2SNikhil Vasoya 	ret = snprintf(fw_version, fw_size, "%u.%u.%u.%u",
16271cd22be2SNikhil Vasoya 		       G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
16281cd22be2SNikhil Vasoya 		       G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
16291cd22be2SNikhil Vasoya 		       G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
16301cd22be2SNikhil Vasoya 		       G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
16311cd22be2SNikhil Vasoya 	if (ret < 0)
16321cd22be2SNikhil Vasoya 		return -EINVAL;
16331cd22be2SNikhil Vasoya 
16341cd22be2SNikhil Vasoya 	ret += 1;
16351cd22be2SNikhil Vasoya 	if (fw_size < (size_t)ret)
16361cd22be2SNikhil Vasoya 		return ret;
16371cd22be2SNikhil Vasoya 
16381cd22be2SNikhil Vasoya 	return 0;
16391cd22be2SNikhil Vasoya }
16401cd22be2SNikhil Vasoya 
164189b890dfSStephen Hemminger static const struct eth_dev_ops cxgbe_eth_dev_ops = {
16420462d115SRahul Lakkireddy 	.dev_start		= cxgbe_dev_start,
16430462d115SRahul Lakkireddy 	.dev_stop		= cxgbe_dev_stop,
16440462d115SRahul Lakkireddy 	.dev_close		= cxgbe_dev_close,
1645cdac6e2eSRahul Lakkireddy 	.promiscuous_enable	= cxgbe_dev_promiscuous_enable,
1646cdac6e2eSRahul Lakkireddy 	.promiscuous_disable	= cxgbe_dev_promiscuous_disable,
1647cdac6e2eSRahul Lakkireddy 	.allmulticast_enable	= cxgbe_dev_allmulticast_enable,
1648cdac6e2eSRahul Lakkireddy 	.allmulticast_disable	= cxgbe_dev_allmulticast_disable,
164992c8a632SRahul Lakkireddy 	.dev_configure		= cxgbe_dev_configure,
165092c8a632SRahul Lakkireddy 	.dev_infos_get		= cxgbe_dev_info_get,
165178a38edfSJianfeng Tan 	.dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1652cdac6e2eSRahul Lakkireddy 	.link_update		= cxgbe_dev_link_update,
1653265af08eSRahul Lakkireddy 	.dev_set_link_up        = cxgbe_dev_set_link_up,
1654265af08eSRahul Lakkireddy 	.dev_set_link_down      = cxgbe_dev_set_link_down,
16550ec33be4SRahul Lakkireddy 	.mtu_set		= cxgbe_dev_mtu_set,
16564a01078bSRahul Lakkireddy 	.tx_queue_setup         = cxgbe_dev_tx_queue_setup,
16574a01078bSRahul Lakkireddy 	.tx_queue_start		= cxgbe_dev_tx_queue_start,
16584a01078bSRahul Lakkireddy 	.tx_queue_stop		= cxgbe_dev_tx_queue_stop,
16594a01078bSRahul Lakkireddy 	.tx_queue_release	= cxgbe_dev_tx_queue_release,
166092c8a632SRahul Lakkireddy 	.rx_queue_setup         = cxgbe_dev_rx_queue_setup,
166192c8a632SRahul Lakkireddy 	.rx_queue_start		= cxgbe_dev_rx_queue_start,
166292c8a632SRahul Lakkireddy 	.rx_queue_stop		= cxgbe_dev_rx_queue_stop,
166392c8a632SRahul Lakkireddy 	.rx_queue_release	= cxgbe_dev_rx_queue_release,
1664fb7ad441SThomas Monjalon 	.flow_ops_get           = cxgbe_dev_flow_ops_get,
1665856505d3SRahul Lakkireddy 	.stats_get		= cxgbe_dev_stats_get,
1666856505d3SRahul Lakkireddy 	.stats_reset		= cxgbe_dev_stats_reset,
166718e44206SRahul Lakkireddy 	.xstats_get             = cxgbe_dev_xstats_get,
166818e44206SRahul Lakkireddy 	.xstats_get_by_id       = cxgbe_dev_xstats_get_by_id,
166918e44206SRahul Lakkireddy 	.xstats_get_names       = cxgbe_dev_xstats_get_names,
167018e44206SRahul Lakkireddy 	.xstats_get_names_by_id = cxgbe_dev_xstats_get_names_by_id,
167118e44206SRahul Lakkireddy 	.xstats_reset           = cxgbe_dev_xstats_reset,
1672631dfc71SRahul Lakkireddy 	.flow_ctrl_get		= cxgbe_flow_ctrl_get,
1673631dfc71SRahul Lakkireddy 	.flow_ctrl_set		= cxgbe_flow_ctrl_set,
1674fe0bd9eeSRahul Lakkireddy 	.get_eeprom_length	= cxgbe_get_eeprom_length,
1675fe0bd9eeSRahul Lakkireddy 	.get_eeprom		= cxgbe_get_eeprom,
1676fe0bd9eeSRahul Lakkireddy 	.set_eeprom		= cxgbe_set_eeprom,
167717ba077cSRahul Lakkireddy 	.get_reg		= cxgbe_get_regs,
167808e21af9SKumar Sanghvi 	.rss_hash_update	= cxgbe_dev_rss_hash_update,
167976aba8d7SKumar Sanghvi 	.rss_hash_conf_get	= cxgbe_dev_rss_hash_conf_get,
16800c4a5dfcSKumar Sanghvi 	.mac_addr_set		= cxgbe_mac_addr_set,
1681f2d344dfSRahul Lakkireddy 	.reta_update            = cxgbe_dev_rss_reta_update,
1682f2d344dfSRahul Lakkireddy 	.reta_query             = cxgbe_dev_rss_reta_query,
168362aafe03SKarra Satwik 	.fec_get_capability     = cxgbe_fec_get_capability,
168462aafe03SKarra Satwik 	.fec_get                = cxgbe_fec_get,
168562aafe03SKarra Satwik 	.fec_set                = cxgbe_fec_set,
16861cd22be2SNikhil Vasoya 	.fw_version_get         = cxgbe_fw_version_get,
168783189849SRahul Lakkireddy };
168883189849SRahul Lakkireddy 
168983189849SRahul Lakkireddy /*
169083189849SRahul Lakkireddy  * Initialize driver
169183189849SRahul Lakkireddy  * It returns 0 on success.
169283189849SRahul Lakkireddy  */
eth_cxgbe_dev_init(struct rte_eth_dev * eth_dev)169383189849SRahul Lakkireddy static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
169483189849SRahul Lakkireddy {
169583189849SRahul Lakkireddy 	struct rte_pci_device *pci_dev;
169663a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
169783189849SRahul Lakkireddy 	struct adapter *adapter = NULL;
169883189849SRahul Lakkireddy 	char name[RTE_ETH_NAME_MAX_LEN];
169983189849SRahul Lakkireddy 	int err = 0;
170083189849SRahul Lakkireddy 
170183189849SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
170283189849SRahul Lakkireddy 
170383189849SRahul Lakkireddy 	eth_dev->dev_ops = &cxgbe_eth_dev_ops;
170492c8a632SRahul Lakkireddy 	eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
17054a01078bSRahul Lakkireddy 	eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1706c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1707eeefe73fSBernard Iremonger 
1708da5cf85eSKumar Sanghvi 	/* for secondary processes, we attach to ethdevs allocated by primary
1709da5cf85eSKumar Sanghvi 	 * and do minimal initialization.
1710da5cf85eSKumar Sanghvi 	 */
1711da5cf85eSKumar Sanghvi 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1712da5cf85eSKumar Sanghvi 		int i;
1713da5cf85eSKumar Sanghvi 
1714da5cf85eSKumar Sanghvi 		for (i = 1; i < MAX_NPORTS; i++) {
1715da5cf85eSKumar Sanghvi 			struct rte_eth_dev *rest_eth_dev;
1716da5cf85eSKumar Sanghvi 			char namei[RTE_ETH_NAME_MAX_LEN];
1717da5cf85eSKumar Sanghvi 
1718da5cf85eSKumar Sanghvi 			snprintf(namei, sizeof(namei), "%s_%d",
1719da5cf85eSKumar Sanghvi 				 pci_dev->device.name, i);
1720da5cf85eSKumar Sanghvi 			rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1721da5cf85eSKumar Sanghvi 			if (rest_eth_dev) {
1722da5cf85eSKumar Sanghvi 				rest_eth_dev->device = &pci_dev->device;
1723da5cf85eSKumar Sanghvi 				rest_eth_dev->dev_ops =
1724da5cf85eSKumar Sanghvi 					eth_dev->dev_ops;
1725da5cf85eSKumar Sanghvi 				rest_eth_dev->rx_pkt_burst =
1726da5cf85eSKumar Sanghvi 					eth_dev->rx_pkt_burst;
1727da5cf85eSKumar Sanghvi 				rest_eth_dev->tx_pkt_burst =
1728da5cf85eSKumar Sanghvi 					eth_dev->tx_pkt_burst;
1729fbe90cddSThomas Monjalon 				rte_eth_dev_probing_finish(rest_eth_dev);
1730da5cf85eSKumar Sanghvi 			}
1731da5cf85eSKumar Sanghvi 		}
1732da5cf85eSKumar Sanghvi 		return 0;
1733da5cf85eSKumar Sanghvi 	}
1734da5cf85eSKumar Sanghvi 
173583189849SRahul Lakkireddy 	snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
173683189849SRahul Lakkireddy 	adapter = rte_zmalloc(name, sizeof(*adapter), 0);
173783189849SRahul Lakkireddy 	if (!adapter)
173883189849SRahul Lakkireddy 		return -1;
173983189849SRahul Lakkireddy 
174083189849SRahul Lakkireddy 	adapter->use_unpacked_mode = 1;
174183189849SRahul Lakkireddy 	adapter->regs = (void *)pci_dev->mem_resource[0].addr;
174283189849SRahul Lakkireddy 	if (!adapter->regs) {
174383189849SRahul Lakkireddy 		dev_err(adapter, "%s: cannot map device registers\n", __func__);
174483189849SRahul Lakkireddy 		err = -ENOMEM;
174583189849SRahul Lakkireddy 		goto out_free_adapter;
174683189849SRahul Lakkireddy 	}
174783189849SRahul Lakkireddy 	adapter->pdev = pci_dev;
174883189849SRahul Lakkireddy 	adapter->eth_dev = eth_dev;
174983189849SRahul Lakkireddy 	pi->adapter = adapter;
175083189849SRahul Lakkireddy 
1751dd7c9f12SRahul Lakkireddy 	cxgbe_process_devargs(adapter);
1752dd7c9f12SRahul Lakkireddy 
175383189849SRahul Lakkireddy 	err = cxgbe_probe(adapter);
17541c1789ccSRahul Lakkireddy 	if (err) {
175583189849SRahul Lakkireddy 		dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
175683189849SRahul Lakkireddy 			__func__, err);
17571c1789ccSRahul Lakkireddy 		goto out_free_adapter;
17581c1789ccSRahul Lakkireddy 	}
17591c1789ccSRahul Lakkireddy 
17601c1789ccSRahul Lakkireddy 	return 0;
176183189849SRahul Lakkireddy 
176283189849SRahul Lakkireddy out_free_adapter:
17631c1789ccSRahul Lakkireddy 	rte_free(adapter);
176483189849SRahul Lakkireddy 	return err;
176583189849SRahul Lakkireddy }
176683189849SRahul Lakkireddy 
eth_cxgbe_dev_uninit(struct rte_eth_dev * eth_dev)1767b84bcf40SRahul Lakkireddy static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1768b84bcf40SRahul Lakkireddy {
176911df4a68SRahul Lakkireddy 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
177011df4a68SRahul Lakkireddy 	uint16_t port_id;
17718a5a0aadSThomas Monjalon 	int err = 0;
1772b84bcf40SRahul Lakkireddy 
1773b84bcf40SRahul Lakkireddy 	/* Free up other ports and all resources */
177411df4a68SRahul Lakkireddy 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
17758a5a0aadSThomas Monjalon 		err |= rte_eth_dev_close(port_id);
177611df4a68SRahul Lakkireddy 
17778a5a0aadSThomas Monjalon 	return err == 0 ? 0 : -EIO;
1778b84bcf40SRahul Lakkireddy }
1779b84bcf40SRahul Lakkireddy 
eth_cxgbe_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)1780fdf91e0fSJan Blunck static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1781fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
1782fdf91e0fSJan Blunck {
1783fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev,
1784fdf91e0fSJan Blunck 		sizeof(struct port_info), eth_cxgbe_dev_init);
1785fdf91e0fSJan Blunck }
1786fdf91e0fSJan Blunck 
eth_cxgbe_pci_remove(struct rte_pci_device * pci_dev)1787fdf91e0fSJan Blunck static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1788fdf91e0fSJan Blunck {
1789b84bcf40SRahul Lakkireddy 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1790fdf91e0fSJan Blunck }
1791fdf91e0fSJan Blunck 
1792fdf91e0fSJan Blunck static struct rte_pci_driver rte_cxgbe_pmd = {
179383189849SRahul Lakkireddy 	.id_table = cxgb4_pci_tbl,
17944dee49c1SRahul Lakkireddy 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1795fdf91e0fSJan Blunck 	.probe = eth_cxgbe_pci_probe,
1796fdf91e0fSJan Blunck 	.remove = eth_cxgbe_pci_remove,
179783189849SRahul Lakkireddy };
179883189849SRahul Lakkireddy 
1799fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
180001f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
180106e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1802f5b3c7b2SShagun Agrawal RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1803fa033437SRahul Lakkireddy 			      CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1804536db938SKarra Satwik 			      CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1805536db938SKarra Satwik 			      CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1806536db938SKarra Satwik 			      CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1807eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE);
1808eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);
1809