xref: /dpdk/drivers/net/cxgbe/cxgbe_ethdev.c (revision 1cd22be2078fd17a7b35aae9e10c99a52432d6d4)
12aa5c722SRahul Lakkireddy /* SPDX-License-Identifier: BSD-3-Clause
22aa5c722SRahul Lakkireddy  * Copyright(c) 2014-2018 Chelsio Communications.
383189849SRahul Lakkireddy  * All rights reserved.
483189849SRahul Lakkireddy  */
583189849SRahul Lakkireddy 
683189849SRahul Lakkireddy #include <sys/queue.h>
783189849SRahul Lakkireddy #include <stdio.h>
883189849SRahul Lakkireddy #include <errno.h>
983189849SRahul Lakkireddy #include <stdint.h>
1083189849SRahul Lakkireddy #include <string.h>
1183189849SRahul Lakkireddy #include <unistd.h>
1283189849SRahul Lakkireddy #include <stdarg.h>
1383189849SRahul Lakkireddy #include <inttypes.h>
1483189849SRahul Lakkireddy #include <netinet/in.h>
1583189849SRahul Lakkireddy 
1683189849SRahul Lakkireddy #include <rte_byteorder.h>
1783189849SRahul Lakkireddy #include <rte_common.h>
1883189849SRahul Lakkireddy #include <rte_cycles.h>
1983189849SRahul Lakkireddy #include <rte_interrupts.h>
2083189849SRahul Lakkireddy #include <rte_log.h>
2183189849SRahul Lakkireddy #include <rte_debug.h>
2283189849SRahul Lakkireddy #include <rte_pci.h>
23c752998bSGaetan Rivet #include <rte_bus_pci.h>
2483189849SRahul Lakkireddy #include <rte_branch_prediction.h>
2583189849SRahul Lakkireddy #include <rte_memory.h>
2683189849SRahul Lakkireddy #include <rte_tailq.h>
2783189849SRahul Lakkireddy #include <rte_eal.h>
2883189849SRahul Lakkireddy #include <rte_alarm.h>
2983189849SRahul Lakkireddy #include <rte_ether.h>
30df96fd0dSBruce Richardson #include <ethdev_driver.h>
31df96fd0dSBruce Richardson #include <ethdev_pci.h>
3283189849SRahul Lakkireddy #include <rte_malloc.h>
3383189849SRahul Lakkireddy #include <rte_random.h>
3483189849SRahul Lakkireddy #include <rte_dev.h>
3583189849SRahul Lakkireddy 
3683189849SRahul Lakkireddy #include "cxgbe.h"
37011ebc23SKumar Sanghvi #include "cxgbe_pfvf.h"
38ee61f511SShagun Agrawal #include "cxgbe_flow.h"
3983189849SRahul Lakkireddy 
4083189849SRahul Lakkireddy /*
4183189849SRahul Lakkireddy  * Macros needed to support the PCI Device ID Table ...
4283189849SRahul Lakkireddy  */
4383189849SRahul Lakkireddy #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
4428a1fd4fSFerruh Yigit 	static const struct rte_pci_id cxgb4_pci_tbl[] = {
4583189849SRahul Lakkireddy #define CH_PCI_DEVICE_ID_FUNCTION 0x4
4683189849SRahul Lakkireddy 
4783189849SRahul Lakkireddy #define PCI_VENDOR_ID_CHELSIO 0x1425
4883189849SRahul Lakkireddy 
4983189849SRahul Lakkireddy #define CH_PCI_ID_TABLE_ENTRY(devid) \
5083189849SRahul Lakkireddy 		{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
5183189849SRahul Lakkireddy 
5283189849SRahul Lakkireddy #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
5383189849SRahul Lakkireddy 		{ .vendor_id = 0, } \
5483189849SRahul Lakkireddy 	}
5583189849SRahul Lakkireddy 
5683189849SRahul Lakkireddy /*
5783189849SRahul Lakkireddy  *... and the PCI ID Table itself ...
5883189849SRahul Lakkireddy  */
5989c8bd95SRahul Lakkireddy #include "base/t4_pci_id_tbl.h"
6083189849SRahul Lakkireddy 
61880ead4eSKumar Sanghvi uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
624a01078bSRahul Lakkireddy 			 uint16_t nb_pkts)
634a01078bSRahul Lakkireddy {
644a01078bSRahul Lakkireddy 	struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
654a01078bSRahul Lakkireddy 	uint16_t pkts_sent, pkts_remain;
664a01078bSRahul Lakkireddy 	uint16_t total_sent = 0;
67b1df19e4SRahul Lakkireddy 	uint16_t idx = 0;
684a01078bSRahul Lakkireddy 	int ret = 0;
694a01078bSRahul Lakkireddy 
704a01078bSRahul Lakkireddy 	t4_os_lock(&txq->txq_lock);
714a01078bSRahul Lakkireddy 	/* free up desc from already completed tx */
724a01078bSRahul Lakkireddy 	reclaim_completed_tx(&txq->q);
73dca62adeSRahul Lakkireddy 	if (unlikely(!nb_pkts))
74dca62adeSRahul Lakkireddy 		goto out_unlock;
75dca62adeSRahul Lakkireddy 
76b1df19e4SRahul Lakkireddy 	rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
774a01078bSRahul Lakkireddy 	while (total_sent < nb_pkts) {
784a01078bSRahul Lakkireddy 		pkts_remain = nb_pkts - total_sent;
794a01078bSRahul Lakkireddy 
804a01078bSRahul Lakkireddy 		for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
81b1df19e4SRahul Lakkireddy 			idx = total_sent + pkts_sent;
82b1df19e4SRahul Lakkireddy 			if ((idx + 1) < nb_pkts)
83b1df19e4SRahul Lakkireddy 				rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
84b1df19e4SRahul Lakkireddy 							volatile void *));
85b1df19e4SRahul Lakkireddy 			ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
864a01078bSRahul Lakkireddy 			if (ret < 0)
874a01078bSRahul Lakkireddy 				break;
884a01078bSRahul Lakkireddy 		}
894a01078bSRahul Lakkireddy 		if (!pkts_sent)
904a01078bSRahul Lakkireddy 			break;
914a01078bSRahul Lakkireddy 		total_sent += pkts_sent;
924a01078bSRahul Lakkireddy 		/* reclaim as much as possible */
934a01078bSRahul Lakkireddy 		reclaim_completed_tx(&txq->q);
944a01078bSRahul Lakkireddy 	}
954a01078bSRahul Lakkireddy 
96dca62adeSRahul Lakkireddy out_unlock:
974a01078bSRahul Lakkireddy 	t4_os_unlock(&txq->txq_lock);
984a01078bSRahul Lakkireddy 	return total_sent;
994a01078bSRahul Lakkireddy }
1004a01078bSRahul Lakkireddy 
101880ead4eSKumar Sanghvi uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
10292c8a632SRahul Lakkireddy 			 uint16_t nb_pkts)
10392c8a632SRahul Lakkireddy {
10492c8a632SRahul Lakkireddy 	struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
10592c8a632SRahul Lakkireddy 	unsigned int work_done;
10692c8a632SRahul Lakkireddy 
10792c8a632SRahul Lakkireddy 	if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
10892c8a632SRahul Lakkireddy 		dev_err(adapter, "error in cxgbe poll\n");
10992c8a632SRahul Lakkireddy 
11092c8a632SRahul Lakkireddy 	return work_done;
11192c8a632SRahul Lakkireddy }
11292c8a632SRahul Lakkireddy 
113bdad90d1SIvan Ilchenko int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
11492c8a632SRahul Lakkireddy 			struct rte_eth_dev_info *device_info)
11592c8a632SRahul Lakkireddy {
11663a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
11792c8a632SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
11892c8a632SRahul Lakkireddy 
119946c9ed9SKonstantin Ananyev 	static const struct rte_eth_desc_lim cxgbe_desc_lim = {
120946c9ed9SKonstantin Ananyev 		.nb_max = CXGBE_MAX_RING_DESC_SIZE,
121946c9ed9SKonstantin Ananyev 		.nb_min = CXGBE_MIN_RING_DESC_SIZE,
122946c9ed9SKonstantin Ananyev 		.nb_align = 1,
123946c9ed9SKonstantin Ananyev 	};
124946c9ed9SKonstantin Ananyev 
1254b2eff45SRahul Lakkireddy 	device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
1264b2eff45SRahul Lakkireddy 	device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
1277b3d5298SRahul Lakkireddy 	device_info->max_rx_queues = adapter->sge.max_ethqsets;
1287b3d5298SRahul Lakkireddy 	device_info->max_tx_queues = adapter->sge.max_ethqsets;
12992c8a632SRahul Lakkireddy 	device_info->max_mac_addrs = 1;
13092c8a632SRahul Lakkireddy 	/* XXX: For now we support one MAC/port */
13192c8a632SRahul Lakkireddy 	device_info->max_vfs = adapter->params.arch.vfcount;
13292c8a632SRahul Lakkireddy 	device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
13392c8a632SRahul Lakkireddy 
134436125e6SShagun Agrawal 	device_info->rx_queue_offload_capa = 0UL;
135436125e6SShagun Agrawal 	device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
13692c8a632SRahul Lakkireddy 
137436125e6SShagun Agrawal 	device_info->tx_queue_offload_capa = 0UL;
138436125e6SShagun Agrawal 	device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
13992c8a632SRahul Lakkireddy 
14092c8a632SRahul Lakkireddy 	device_info->reta_size = pi->rss_size;
14108e21af9SKumar Sanghvi 	device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
14208e21af9SKumar Sanghvi 	device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
143946c9ed9SKonstantin Ananyev 
144946c9ed9SKonstantin Ananyev 	device_info->rx_desc_lim = cxgbe_desc_lim;
145946c9ed9SKonstantin Ananyev 	device_info->tx_desc_lim = cxgbe_desc_lim;
146e307e65bSRahul Lakkireddy 	cxgbe_get_speed_caps(pi, &device_info->speed_capa);
147bdad90d1SIvan Ilchenko 
148bdad90d1SIvan Ilchenko 	return 0;
14992c8a632SRahul Lakkireddy }
15092c8a632SRahul Lakkireddy 
1519039c812SAndrew Rybchenko int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
152cdac6e2eSRahul Lakkireddy {
15363a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
154cdac6e2eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
155422d7823SRahul Lakkireddy 	int ret;
156422d7823SRahul Lakkireddy 
157422d7823SRahul Lakkireddy 	if (adapter->params.rawf_size != 0) {
158422d7823SRahul Lakkireddy 		ret = cxgbe_mpstcam_rawf_enable(pi);
159422d7823SRahul Lakkireddy 		if (ret < 0)
160422d7823SRahul Lakkireddy 			return ret;
161422d7823SRahul Lakkireddy 	}
162cdac6e2eSRahul Lakkireddy 
1639039c812SAndrew Rybchenko 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
164cdac6e2eSRahul Lakkireddy 			     1, -1, 1, -1, false);
165cdac6e2eSRahul Lakkireddy }
166cdac6e2eSRahul Lakkireddy 
1679039c812SAndrew Rybchenko int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
168cdac6e2eSRahul Lakkireddy {
16963a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
170cdac6e2eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
171422d7823SRahul Lakkireddy 	int ret;
172422d7823SRahul Lakkireddy 
173422d7823SRahul Lakkireddy 	if (adapter->params.rawf_size != 0) {
174422d7823SRahul Lakkireddy 		ret = cxgbe_mpstcam_rawf_disable(pi);
175422d7823SRahul Lakkireddy 		if (ret < 0)
176422d7823SRahul Lakkireddy 			return ret;
177422d7823SRahul Lakkireddy 	}
178cdac6e2eSRahul Lakkireddy 
1799039c812SAndrew Rybchenko 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
180cdac6e2eSRahul Lakkireddy 			     0, -1, 1, -1, false);
181cdac6e2eSRahul Lakkireddy }
182cdac6e2eSRahul Lakkireddy 
183ca041cd4SIvan Ilchenko int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
184cdac6e2eSRahul Lakkireddy {
18563a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
186cdac6e2eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
187cdac6e2eSRahul Lakkireddy 
188cdac6e2eSRahul Lakkireddy 	/* TODO: address filters ?? */
189cdac6e2eSRahul Lakkireddy 
190ca041cd4SIvan Ilchenko 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
191cdac6e2eSRahul Lakkireddy 			     -1, 1, 1, -1, false);
192cdac6e2eSRahul Lakkireddy }
193cdac6e2eSRahul Lakkireddy 
194ca041cd4SIvan Ilchenko int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
195cdac6e2eSRahul Lakkireddy {
19663a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
197cdac6e2eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
198cdac6e2eSRahul Lakkireddy 
199cdac6e2eSRahul Lakkireddy 	/* TODO: address filters ?? */
200cdac6e2eSRahul Lakkireddy 
201ca041cd4SIvan Ilchenko 	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
202cdac6e2eSRahul Lakkireddy 			     -1, 0, 1, -1, false);
203cdac6e2eSRahul Lakkireddy }
204cdac6e2eSRahul Lakkireddy 
205011ebc23SKumar Sanghvi int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
206265af08eSRahul Lakkireddy 			  int wait_to_complete)
207cdac6e2eSRahul Lakkireddy {
20863a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
209265af08eSRahul Lakkireddy 	unsigned int i, work_done, budget = 32;
210a83041b1SKarra Satwik 	struct link_config *lc = &pi->link_cfg;
211a83041b1SKarra Satwik 	struct adapter *adapter = pi->adapter;
212a83041b1SKarra Satwik 	struct rte_eth_link new_link = { 0 };
213265af08eSRahul Lakkireddy 	u8 old_link = pi->link_cfg.link_ok;
214a83041b1SKarra Satwik 	struct sge *s = &adapter->sge;
215cdac6e2eSRahul Lakkireddy 
216265af08eSRahul Lakkireddy 	for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
21710fb9e47SRahul Lakkireddy 		if (!s->fw_evtq.desc)
21810fb9e47SRahul Lakkireddy 			break;
21910fb9e47SRahul Lakkireddy 
220cdac6e2eSRahul Lakkireddy 		cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
221cdac6e2eSRahul Lakkireddy 
222265af08eSRahul Lakkireddy 		/* Exit if link status changed or always forced up */
223b7fd9ea8SStephen Hemminger 		if (pi->link_cfg.link_ok != old_link ||
224b7fd9ea8SStephen Hemminger 		    cxgbe_force_linkup(adapter))
225265af08eSRahul Lakkireddy 			break;
226265af08eSRahul Lakkireddy 
227265af08eSRahul Lakkireddy 		if (!wait_to_complete)
228265af08eSRahul Lakkireddy 			break;
229265af08eSRahul Lakkireddy 
230265af08eSRahul Lakkireddy 		rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
231265af08eSRahul Lakkireddy 	}
232265af08eSRahul Lakkireddy 
233b7fd9ea8SStephen Hemminger 	new_link.link_status = cxgbe_force_linkup(adapter) ?
234f5b3c7b2SShagun Agrawal 			       ETH_LINK_UP : pi->link_cfg.link_ok;
235a83041b1SKarra Satwik 	new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
236f5b3c7b2SShagun Agrawal 	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
237a83041b1SKarra Satwik 	new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
238cdac6e2eSRahul Lakkireddy 
239f5b3c7b2SShagun Agrawal 	return rte_eth_linkstatus_set(eth_dev, &new_link);
240cdac6e2eSRahul Lakkireddy }
241cdac6e2eSRahul Lakkireddy 
242265af08eSRahul Lakkireddy /**
243265af08eSRahul Lakkireddy  * Set device link up.
244265af08eSRahul Lakkireddy  */
245265af08eSRahul Lakkireddy int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
246265af08eSRahul Lakkireddy {
24763a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
248265af08eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
249265af08eSRahul Lakkireddy 	unsigned int work_done, budget = 32;
250265af08eSRahul Lakkireddy 	struct sge *s = &adapter->sge;
251265af08eSRahul Lakkireddy 	int ret;
252265af08eSRahul Lakkireddy 
25310fb9e47SRahul Lakkireddy 	if (!s->fw_evtq.desc)
25410fb9e47SRahul Lakkireddy 		return -ENOMEM;
25510fb9e47SRahul Lakkireddy 
256265af08eSRahul Lakkireddy 	/* Flush all link events */
257265af08eSRahul Lakkireddy 	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
258265af08eSRahul Lakkireddy 
259265af08eSRahul Lakkireddy 	/* If link already up, nothing to do */
260265af08eSRahul Lakkireddy 	if (pi->link_cfg.link_ok)
261265af08eSRahul Lakkireddy 		return 0;
262265af08eSRahul Lakkireddy 
263265af08eSRahul Lakkireddy 	ret = cxgbe_set_link_status(pi, true);
264265af08eSRahul Lakkireddy 	if (ret)
265265af08eSRahul Lakkireddy 		return ret;
266265af08eSRahul Lakkireddy 
267265af08eSRahul Lakkireddy 	cxgbe_dev_link_update(dev, 1);
268265af08eSRahul Lakkireddy 	return 0;
269265af08eSRahul Lakkireddy }
270265af08eSRahul Lakkireddy 
271265af08eSRahul Lakkireddy /**
272265af08eSRahul Lakkireddy  * Set device link down.
273265af08eSRahul Lakkireddy  */
274265af08eSRahul Lakkireddy int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
275265af08eSRahul Lakkireddy {
27663a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
277265af08eSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
278265af08eSRahul Lakkireddy 	unsigned int work_done, budget = 32;
279265af08eSRahul Lakkireddy 	struct sge *s = &adapter->sge;
280265af08eSRahul Lakkireddy 	int ret;
281265af08eSRahul Lakkireddy 
28210fb9e47SRahul Lakkireddy 	if (!s->fw_evtq.desc)
28310fb9e47SRahul Lakkireddy 		return -ENOMEM;
28410fb9e47SRahul Lakkireddy 
285265af08eSRahul Lakkireddy 	/* Flush all link events */
286265af08eSRahul Lakkireddy 	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
287265af08eSRahul Lakkireddy 
288265af08eSRahul Lakkireddy 	/* If link already down, nothing to do */
289265af08eSRahul Lakkireddy 	if (!pi->link_cfg.link_ok)
290265af08eSRahul Lakkireddy 		return 0;
291265af08eSRahul Lakkireddy 
292265af08eSRahul Lakkireddy 	ret = cxgbe_set_link_status(pi, false);
293265af08eSRahul Lakkireddy 	if (ret)
294265af08eSRahul Lakkireddy 		return ret;
295265af08eSRahul Lakkireddy 
296265af08eSRahul Lakkireddy 	cxgbe_dev_link_update(dev, 0);
297265af08eSRahul Lakkireddy 	return 0;
298265af08eSRahul Lakkireddy }
299265af08eSRahul Lakkireddy 
300011ebc23SKumar Sanghvi int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
3010ec33be4SRahul Lakkireddy {
30263a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
3030ec33be4SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
3040ec33be4SRahul Lakkireddy 	struct rte_eth_dev_info dev_info;
3050ec33be4SRahul Lakkireddy 	int err;
30635b2d13fSOlivier Matz 	uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3070ec33be4SRahul Lakkireddy 
308bdad90d1SIvan Ilchenko 	err = cxgbe_dev_info_get(eth_dev, &dev_info);
309bdad90d1SIvan Ilchenko 	if (err != 0)
310bdad90d1SIvan Ilchenko 		return err;
3110ec33be4SRahul Lakkireddy 
31235b2d13fSOlivier Matz 	/* Must accommodate at least RTE_ETHER_MIN_MTU */
31335b2d13fSOlivier Matz 	if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
3140ec33be4SRahul Lakkireddy 		return -EINVAL;
3150ec33be4SRahul Lakkireddy 
3160ec33be4SRahul Lakkireddy 	/* set to jumbo mode if needed */
3175dfbad55SSteve Yang 	if (new_mtu > CXGBE_ETH_MAX_LEN)
318436125e6SShagun Agrawal 		eth_dev->data->dev_conf.rxmode.offloads |=
319436125e6SShagun Agrawal 			DEV_RX_OFFLOAD_JUMBO_FRAME;
3200ec33be4SRahul Lakkireddy 	else
321436125e6SShagun Agrawal 		eth_dev->data->dev_conf.rxmode.offloads &=
322436125e6SShagun Agrawal 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
3230ec33be4SRahul Lakkireddy 
3240ec33be4SRahul Lakkireddy 	err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
3250ec33be4SRahul Lakkireddy 			    -1, -1, true);
3260ec33be4SRahul Lakkireddy 	if (!err)
3270ec33be4SRahul Lakkireddy 		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
3280ec33be4SRahul Lakkireddy 
3290ec33be4SRahul Lakkireddy 	return err;
3300ec33be4SRahul Lakkireddy }
3310ec33be4SRahul Lakkireddy 
3320462d115SRahul Lakkireddy /*
3330462d115SRahul Lakkireddy  * Stop device.
3340462d115SRahul Lakkireddy  */
335b142387bSThomas Monjalon int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
3360462d115SRahul Lakkireddy {
33711df4a68SRahul Lakkireddy 	struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
3380462d115SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
33911df4a68SRahul Lakkireddy 	u8 i;
3400462d115SRahul Lakkireddy 
3410462d115SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
3420462d115SRahul Lakkireddy 
34330410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
34430410493SThomas Monjalon 		return 0;
34530410493SThomas Monjalon 
3460462d115SRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE))
347b142387bSThomas Monjalon 		return 0;
3480462d115SRahul Lakkireddy 
34911df4a68SRahul Lakkireddy 	if (!pi->viid)
350b142387bSThomas Monjalon 		return 0;
35111df4a68SRahul Lakkireddy 
3520462d115SRahul Lakkireddy 	cxgbe_down(pi);
3536b78a629SRahul Lakkireddy 	t4_sge_eth_release_queues(pi);
35411df4a68SRahul Lakkireddy 	t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
35511df4a68SRahul Lakkireddy 	pi->viid = 0;
35611df4a68SRahul Lakkireddy 
35711df4a68SRahul Lakkireddy 	/* Free up the adapter-wide resources only after all the ports
35811df4a68SRahul Lakkireddy 	 * under this PF have been closed.
35911df4a68SRahul Lakkireddy 	 */
36011df4a68SRahul Lakkireddy 	for_each_port(adapter, i) {
36111df4a68SRahul Lakkireddy 		temp_pi = adap2pinfo(adapter, i);
36211df4a68SRahul Lakkireddy 		if (temp_pi->viid)
363b142387bSThomas Monjalon 			return 0;
36411df4a68SRahul Lakkireddy 	}
36511df4a68SRahul Lakkireddy 
36611df4a68SRahul Lakkireddy 	cxgbe_close(adapter);
36711df4a68SRahul Lakkireddy 	rte_free(adapter);
368b142387bSThomas Monjalon 
369b142387bSThomas Monjalon 	return 0;
3700462d115SRahul Lakkireddy }
3710462d115SRahul Lakkireddy 
3720462d115SRahul Lakkireddy /* Start the device.
3730462d115SRahul Lakkireddy  * It returns 0 on success.
3740462d115SRahul Lakkireddy  */
375011ebc23SKumar Sanghvi int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
3760462d115SRahul Lakkireddy {
37763a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
3780f3ff244SRahul Lakkireddy 	struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
3790462d115SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
3800462d115SRahul Lakkireddy 	int err = 0, i;
3810462d115SRahul Lakkireddy 
3820462d115SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
3830462d115SRahul Lakkireddy 
3840462d115SRahul Lakkireddy 	/*
3850462d115SRahul Lakkireddy 	 * If we don't have a connection to the firmware there's nothing we
3860462d115SRahul Lakkireddy 	 * can do.
3870462d115SRahul Lakkireddy 	 */
3880462d115SRahul Lakkireddy 	if (!(adapter->flags & FW_OK)) {
3890462d115SRahul Lakkireddy 		err = -ENXIO;
3900462d115SRahul Lakkireddy 		goto out;
3910462d115SRahul Lakkireddy 	}
3920462d115SRahul Lakkireddy 
3930462d115SRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE)) {
3940462d115SRahul Lakkireddy 		err = cxgbe_up(adapter);
3950462d115SRahul Lakkireddy 		if (err < 0)
3960462d115SRahul Lakkireddy 			goto out;
3970462d115SRahul Lakkireddy 	}
3980462d115SRahul Lakkireddy 
3990f3ff244SRahul Lakkireddy 	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
4000f3ff244SRahul Lakkireddy 		eth_dev->data->scattered_rx = 1;
4010f3ff244SRahul Lakkireddy 	else
4020f3ff244SRahul Lakkireddy 		eth_dev->data->scattered_rx = 0;
4030f3ff244SRahul Lakkireddy 
404d87ba24dSRahul Lakkireddy 	cxgbe_enable_rx_queues(pi);
405d87ba24dSRahul Lakkireddy 
406b7fd9ea8SStephen Hemminger 	err = cxgbe_setup_rss(pi);
4070462d115SRahul Lakkireddy 	if (err)
4080462d115SRahul Lakkireddy 		goto out;
4090462d115SRahul Lakkireddy 
4100462d115SRahul Lakkireddy 	for (i = 0; i < pi->n_tx_qsets; i++) {
4110462d115SRahul Lakkireddy 		err = cxgbe_dev_tx_queue_start(eth_dev, i);
4120462d115SRahul Lakkireddy 		if (err)
4130462d115SRahul Lakkireddy 			goto out;
4140462d115SRahul Lakkireddy 	}
4150462d115SRahul Lakkireddy 
4160462d115SRahul Lakkireddy 	for (i = 0; i < pi->n_rx_qsets; i++) {
4170462d115SRahul Lakkireddy 		err = cxgbe_dev_rx_queue_start(eth_dev, i);
4180462d115SRahul Lakkireddy 		if (err)
4190462d115SRahul Lakkireddy 			goto out;
4200462d115SRahul Lakkireddy 	}
4210462d115SRahul Lakkireddy 
422b7fd9ea8SStephen Hemminger 	err = cxgbe_link_start(pi);
4230462d115SRahul Lakkireddy 	if (err)
4240462d115SRahul Lakkireddy 		goto out;
4250462d115SRahul Lakkireddy 
4260462d115SRahul Lakkireddy out:
4270462d115SRahul Lakkireddy 	return err;
4280462d115SRahul Lakkireddy }
4290462d115SRahul Lakkireddy 
4300462d115SRahul Lakkireddy /*
4310462d115SRahul Lakkireddy  * Stop device: disable rx and tx functions to allow for reconfiguring.
4320462d115SRahul Lakkireddy  */
43362024eb8SIvan Ilchenko int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
4340462d115SRahul Lakkireddy {
43563a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
4360462d115SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
4370462d115SRahul Lakkireddy 
4380462d115SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
4390462d115SRahul Lakkireddy 
4400462d115SRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE))
44162024eb8SIvan Ilchenko 		return 0;
4420462d115SRahul Lakkireddy 
4430462d115SRahul Lakkireddy 	cxgbe_down(pi);
4440462d115SRahul Lakkireddy 
4450462d115SRahul Lakkireddy 	/*
4460462d115SRahul Lakkireddy 	 *  We clear queues only if both tx and rx path of the port
4470462d115SRahul Lakkireddy 	 *  have been disabled
4480462d115SRahul Lakkireddy 	 */
4490462d115SRahul Lakkireddy 	t4_sge_eth_clear_queues(pi);
4500f3ff244SRahul Lakkireddy 	eth_dev->data->scattered_rx = 0;
45162024eb8SIvan Ilchenko 
45262024eb8SIvan Ilchenko 	return 0;
4530462d115SRahul Lakkireddy }
4540462d115SRahul Lakkireddy 
455011ebc23SKumar Sanghvi int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
45692c8a632SRahul Lakkireddy {
45763a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
45892c8a632SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
45992c8a632SRahul Lakkireddy 	int err;
46092c8a632SRahul Lakkireddy 
46192c8a632SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
46292c8a632SRahul Lakkireddy 
46373fb89ddSAndrew Rybchenko 	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
46473fb89ddSAndrew Rybchenko 		eth_dev->data->dev_conf.rxmode.offloads |=
46573fb89ddSAndrew Rybchenko 			DEV_RX_OFFLOAD_RSS_HASH;
4668b945a7fSPavan Nikhilesh 
46792c8a632SRahul Lakkireddy 	if (!(adapter->flags & FW_QUEUE_BOUND)) {
468b7fd9ea8SStephen Hemminger 		err = cxgbe_setup_sge_fwevtq(adapter);
46992c8a632SRahul Lakkireddy 		if (err)
47092c8a632SRahul Lakkireddy 			return err;
47192c8a632SRahul Lakkireddy 		adapter->flags |= FW_QUEUE_BOUND;
472a0163693SShagun Agrawal 		if (is_pf4(adapter)) {
473b7fd9ea8SStephen Hemminger 			err = cxgbe_setup_sge_ctrl_txq(adapter);
4743a3aaabcSShagun Agrawal 			if (err)
4753a3aaabcSShagun Agrawal 				return err;
47692c8a632SRahul Lakkireddy 		}
477a0163693SShagun Agrawal 	}
47892c8a632SRahul Lakkireddy 
479b7fd9ea8SStephen Hemminger 	err = cxgbe_cfg_queue_count(eth_dev);
48092c8a632SRahul Lakkireddy 	if (err)
48192c8a632SRahul Lakkireddy 		return err;
48292c8a632SRahul Lakkireddy 
48392c8a632SRahul Lakkireddy 	return 0;
48492c8a632SRahul Lakkireddy }
48592c8a632SRahul Lakkireddy 
486011ebc23SKumar Sanghvi int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
4874a01078bSRahul Lakkireddy {
4886b6861c1SPablo de Lara 	int ret;
4894a01078bSRahul Lakkireddy 	struct sge_eth_txq *txq = (struct sge_eth_txq *)
4904a01078bSRahul Lakkireddy 				  (eth_dev->data->tx_queues[tx_queue_id]);
4914a01078bSRahul Lakkireddy 
4924a01078bSRahul Lakkireddy 	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
4934a01078bSRahul Lakkireddy 
4946b6861c1SPablo de Lara 	ret = t4_sge_eth_txq_start(txq);
4956b6861c1SPablo de Lara 	if (ret == 0)
4966b6861c1SPablo de Lara 		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4976b6861c1SPablo de Lara 
4986b6861c1SPablo de Lara 	return ret;
4994a01078bSRahul Lakkireddy }
5004a01078bSRahul Lakkireddy 
501011ebc23SKumar Sanghvi int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
5024a01078bSRahul Lakkireddy {
5036b6861c1SPablo de Lara 	int ret;
5044a01078bSRahul Lakkireddy 	struct sge_eth_txq *txq = (struct sge_eth_txq *)
5054a01078bSRahul Lakkireddy 				  (eth_dev->data->tx_queues[tx_queue_id]);
5064a01078bSRahul Lakkireddy 
5074a01078bSRahul Lakkireddy 	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
5084a01078bSRahul Lakkireddy 
5096b6861c1SPablo de Lara 	ret = t4_sge_eth_txq_stop(txq);
5106b6861c1SPablo de Lara 	if (ret == 0)
5116b6861c1SPablo de Lara 		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
5126b6861c1SPablo de Lara 
5136b6861c1SPablo de Lara 	return ret;
5144a01078bSRahul Lakkireddy }
5154a01078bSRahul Lakkireddy 
516011ebc23SKumar Sanghvi int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
5174a01078bSRahul Lakkireddy 			     uint16_t queue_idx, uint16_t nb_desc,
5184a01078bSRahul Lakkireddy 			     unsigned int socket_id,
519a4996bd8SWei Dai 			     const struct rte_eth_txconf *tx_conf __rte_unused)
5204a01078bSRahul Lakkireddy {
52163a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
5224a01078bSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
5234a01078bSRahul Lakkireddy 	struct sge *s = &adapter->sge;
5244a01078bSRahul Lakkireddy 	unsigned int temp_nb_desc;
5257b3d5298SRahul Lakkireddy 	struct sge_eth_txq *txq;
5267b3d5298SRahul Lakkireddy 	int err = 0;
5274a01078bSRahul Lakkireddy 
5287b3d5298SRahul Lakkireddy 	txq = &s->ethtxq[pi->first_txqset + queue_idx];
5294a01078bSRahul Lakkireddy 	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
5304a01078bSRahul Lakkireddy 		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
5317b3d5298SRahul Lakkireddy 		  socket_id, pi->first_txqset);
5324a01078bSRahul Lakkireddy 
5334a01078bSRahul Lakkireddy 	/*  Free up the existing queue  */
5344a01078bSRahul Lakkireddy 	if (eth_dev->data->tx_queues[queue_idx]) {
5354a01078bSRahul Lakkireddy 		cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
5364a01078bSRahul Lakkireddy 		eth_dev->data->tx_queues[queue_idx] = NULL;
5374a01078bSRahul Lakkireddy 	}
5384a01078bSRahul Lakkireddy 
5394a01078bSRahul Lakkireddy 	eth_dev->data->tx_queues[queue_idx] = (void *)txq;
5404a01078bSRahul Lakkireddy 
5414a01078bSRahul Lakkireddy 	/* Sanity Checking
5424a01078bSRahul Lakkireddy 	 *
5434a01078bSRahul Lakkireddy 	 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
5444a01078bSRahul Lakkireddy 	 */
5454a01078bSRahul Lakkireddy 	temp_nb_desc = nb_desc;
5464a01078bSRahul Lakkireddy 	if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
5474a01078bSRahul Lakkireddy 		dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
5484a01078bSRahul Lakkireddy 			 __func__, CXGBE_MIN_RING_DESC_SIZE,
5494a01078bSRahul Lakkireddy 			 CXGBE_DEFAULT_TX_DESC_SIZE);
5504a01078bSRahul Lakkireddy 		temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
5514a01078bSRahul Lakkireddy 	} else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
5524a01078bSRahul Lakkireddy 		dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
5534a01078bSRahul Lakkireddy 			__func__, CXGBE_MIN_RING_DESC_SIZE,
5544a01078bSRahul Lakkireddy 			CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
5554a01078bSRahul Lakkireddy 		return -(EINVAL);
5564a01078bSRahul Lakkireddy 	}
5574a01078bSRahul Lakkireddy 
5584a01078bSRahul Lakkireddy 	txq->q.size = temp_nb_desc;
5594a01078bSRahul Lakkireddy 
5604a01078bSRahul Lakkireddy 	err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
5614a01078bSRahul Lakkireddy 				   s->fw_evtq.cntxt_id, socket_id);
5624a01078bSRahul Lakkireddy 
5635e59e39aSKumar Sanghvi 	dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
5645e59e39aSKumar Sanghvi 		  __func__, txq->q.cntxt_id, txq->q.abs_id, err);
5654a01078bSRahul Lakkireddy 	return err;
5664a01078bSRahul Lakkireddy }
5674a01078bSRahul Lakkireddy 
568011ebc23SKumar Sanghvi void cxgbe_dev_tx_queue_release(void *q)
5694a01078bSRahul Lakkireddy {
5704a01078bSRahul Lakkireddy 	struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
5714a01078bSRahul Lakkireddy 
5724a01078bSRahul Lakkireddy 	if (txq) {
5734a01078bSRahul Lakkireddy 		struct port_info *pi = (struct port_info *)
5744a01078bSRahul Lakkireddy 				       (txq->eth_dev->data->dev_private);
5754a01078bSRahul Lakkireddy 		struct adapter *adap = pi->adapter;
5764a01078bSRahul Lakkireddy 
5774a01078bSRahul Lakkireddy 		dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
5784a01078bSRahul Lakkireddy 			  __func__, pi->port_id, txq->q.cntxt_id);
5794a01078bSRahul Lakkireddy 
5804a01078bSRahul Lakkireddy 		t4_sge_eth_txq_release(adap, txq);
5814a01078bSRahul Lakkireddy 	}
5824a01078bSRahul Lakkireddy }
5834a01078bSRahul Lakkireddy 
584011ebc23SKumar Sanghvi int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
58592c8a632SRahul Lakkireddy {
58663a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
58792c8a632SRahul Lakkireddy 	struct adapter *adap = pi->adapter;
588e30e5407SRahul Lakkireddy 	struct sge_eth_rxq *rxq;
589e30e5407SRahul Lakkireddy 	int ret;
59092c8a632SRahul Lakkireddy 
59192c8a632SRahul Lakkireddy 	dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
59292c8a632SRahul Lakkireddy 		  __func__, pi->port_id, rx_queue_id);
59392c8a632SRahul Lakkireddy 
594e30e5407SRahul Lakkireddy 	rxq = eth_dev->data->rx_queues[rx_queue_id];
595e30e5407SRahul Lakkireddy 	ret = t4_sge_eth_rxq_start(adap, rxq);
5966b6861c1SPablo de Lara 	if (ret == 0)
5976b6861c1SPablo de Lara 		eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
5986b6861c1SPablo de Lara 
5996b6861c1SPablo de Lara 	return ret;
60092c8a632SRahul Lakkireddy }
60192c8a632SRahul Lakkireddy 
602011ebc23SKumar Sanghvi int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
60392c8a632SRahul Lakkireddy {
60463a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
60592c8a632SRahul Lakkireddy 	struct adapter *adap = pi->adapter;
606e30e5407SRahul Lakkireddy 	struct sge_eth_rxq *rxq;
607e30e5407SRahul Lakkireddy 	int ret;
60892c8a632SRahul Lakkireddy 
60992c8a632SRahul Lakkireddy 	dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
61092c8a632SRahul Lakkireddy 		  __func__, pi->port_id, rx_queue_id);
61192c8a632SRahul Lakkireddy 
612e30e5407SRahul Lakkireddy 	rxq = eth_dev->data->rx_queues[rx_queue_id];
613e30e5407SRahul Lakkireddy 	ret = t4_sge_eth_rxq_stop(adap, rxq);
6146b6861c1SPablo de Lara 	if (ret == 0)
6156b6861c1SPablo de Lara 		eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
6166b6861c1SPablo de Lara 
6176b6861c1SPablo de Lara 	return ret;
61892c8a632SRahul Lakkireddy }
61992c8a632SRahul Lakkireddy 
620011ebc23SKumar Sanghvi int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
62192c8a632SRahul Lakkireddy 			     uint16_t queue_idx, uint16_t nb_desc,
62292c8a632SRahul Lakkireddy 			     unsigned int socket_id,
623a4996bd8SWei Dai 			     const struct rte_eth_rxconf *rx_conf __rte_unused,
62492c8a632SRahul Lakkireddy 			     struct rte_mempool *mp)
62592c8a632SRahul Lakkireddy {
6267b3d5298SRahul Lakkireddy 	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
62763a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
62892c8a632SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
6294b2eff45SRahul Lakkireddy 	struct rte_eth_dev_info dev_info;
6307b3d5298SRahul Lakkireddy 	struct sge *s = &adapter->sge;
6317b3d5298SRahul Lakkireddy 	unsigned int temp_nb_desc;
6327b3d5298SRahul Lakkireddy 	int err = 0, msi_idx = 0;
6337b3d5298SRahul Lakkireddy 	struct sge_eth_rxq *rxq;
63492c8a632SRahul Lakkireddy 
6357b3d5298SRahul Lakkireddy 	rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
63692c8a632SRahul Lakkireddy 	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
63792c8a632SRahul Lakkireddy 		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
63892c8a632SRahul Lakkireddy 		  socket_id, mp);
63992c8a632SRahul Lakkireddy 
640bdad90d1SIvan Ilchenko 	err = cxgbe_dev_info_get(eth_dev, &dev_info);
641bdad90d1SIvan Ilchenko 	if (err != 0) {
642bdad90d1SIvan Ilchenko 		dev_err(adap, "%s: error during getting ethernet device info",
643bdad90d1SIvan Ilchenko 			__func__);
644bdad90d1SIvan Ilchenko 		return err;
645bdad90d1SIvan Ilchenko 	}
6464b2eff45SRahul Lakkireddy 
64735b2d13fSOlivier Matz 	/* Must accommodate at least RTE_ETHER_MIN_MTU */
6484b2eff45SRahul Lakkireddy 	if ((pkt_len < dev_info.min_rx_bufsize) ||
6494b2eff45SRahul Lakkireddy 	    (pkt_len > dev_info.max_rx_pktlen)) {
6504b2eff45SRahul Lakkireddy 		dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
6514b2eff45SRahul Lakkireddy 			__func__, dev_info.min_rx_bufsize,
6524b2eff45SRahul Lakkireddy 			dev_info.max_rx_pktlen);
6534b2eff45SRahul Lakkireddy 		return -EINVAL;
6544b2eff45SRahul Lakkireddy 	}
6554b2eff45SRahul Lakkireddy 
65692c8a632SRahul Lakkireddy 	/*  Free up the existing queue  */
65792c8a632SRahul Lakkireddy 	if (eth_dev->data->rx_queues[queue_idx]) {
65892c8a632SRahul Lakkireddy 		cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
65992c8a632SRahul Lakkireddy 		eth_dev->data->rx_queues[queue_idx] = NULL;
66092c8a632SRahul Lakkireddy 	}
66192c8a632SRahul Lakkireddy 
66292c8a632SRahul Lakkireddy 	eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
66392c8a632SRahul Lakkireddy 
66492c8a632SRahul Lakkireddy 	/* Sanity Checking
66592c8a632SRahul Lakkireddy 	 *
66692c8a632SRahul Lakkireddy 	 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
66792c8a632SRahul Lakkireddy 	 */
66892c8a632SRahul Lakkireddy 	temp_nb_desc = nb_desc;
66992c8a632SRahul Lakkireddy 	if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
67092c8a632SRahul Lakkireddy 		dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
67192c8a632SRahul Lakkireddy 			 __func__, CXGBE_MIN_RING_DESC_SIZE,
67292c8a632SRahul Lakkireddy 			 CXGBE_DEFAULT_RX_DESC_SIZE);
67392c8a632SRahul Lakkireddy 		temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
67492c8a632SRahul Lakkireddy 	} else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
67592c8a632SRahul Lakkireddy 		dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
67692c8a632SRahul Lakkireddy 			__func__, CXGBE_MIN_RING_DESC_SIZE,
67792c8a632SRahul Lakkireddy 			CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
67892c8a632SRahul Lakkireddy 		return -(EINVAL);
67992c8a632SRahul Lakkireddy 	}
68092c8a632SRahul Lakkireddy 
68192c8a632SRahul Lakkireddy 	rxq->rspq.size = temp_nb_desc;
68292c8a632SRahul Lakkireddy 	if ((&rxq->fl) != NULL)
68392c8a632SRahul Lakkireddy 		rxq->fl.size = temp_nb_desc;
68492c8a632SRahul Lakkireddy 
6854b2eff45SRahul Lakkireddy 	/* Set to jumbo mode if necessary */
6865dfbad55SSteve Yang 	if (pkt_len > CXGBE_ETH_MAX_LEN)
687436125e6SShagun Agrawal 		eth_dev->data->dev_conf.rxmode.offloads |=
688436125e6SShagun Agrawal 			DEV_RX_OFFLOAD_JUMBO_FRAME;
6894b2eff45SRahul Lakkireddy 	else
690436125e6SShagun Agrawal 		eth_dev->data->dev_conf.rxmode.offloads &=
691436125e6SShagun Agrawal 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
6924b2eff45SRahul Lakkireddy 
69392c8a632SRahul Lakkireddy 	err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
69434020f18SVishal Kulkarni 			       &rxq->fl, NULL,
6955e59e39aSKumar Sanghvi 			       is_pf4(adapter) ?
6965e59e39aSKumar Sanghvi 			       t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
69792c8a632SRahul Lakkireddy 			       queue_idx, socket_id);
69892c8a632SRahul Lakkireddy 
6995e59e39aSKumar Sanghvi 	dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
7005e59e39aSKumar Sanghvi 		  __func__, err, pi->port_id, rxq->rspq.cntxt_id,
7015e59e39aSKumar Sanghvi 		  rxq->rspq.abs_id);
70292c8a632SRahul Lakkireddy 	return err;
70392c8a632SRahul Lakkireddy }
70492c8a632SRahul Lakkireddy 
705011ebc23SKumar Sanghvi void cxgbe_dev_rx_queue_release(void *q)
70692c8a632SRahul Lakkireddy {
70792c8a632SRahul Lakkireddy 	struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
70892c8a632SRahul Lakkireddy 
7097b3d5298SRahul Lakkireddy 	if (rxq) {
71092c8a632SRahul Lakkireddy 		struct port_info *pi = (struct port_info *)
7117b3d5298SRahul Lakkireddy 				       (rxq->rspq.eth_dev->data->dev_private);
71292c8a632SRahul Lakkireddy 		struct adapter *adap = pi->adapter;
71392c8a632SRahul Lakkireddy 
71492c8a632SRahul Lakkireddy 		dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
71592c8a632SRahul Lakkireddy 			  __func__, pi->port_id, rxq->rspq.cntxt_id);
71692c8a632SRahul Lakkireddy 
71792c8a632SRahul Lakkireddy 		t4_sge_eth_rxq_release(adap, rxq);
71892c8a632SRahul Lakkireddy 	}
71992c8a632SRahul Lakkireddy }
72092c8a632SRahul Lakkireddy 
721856505d3SRahul Lakkireddy /*
722856505d3SRahul Lakkireddy  * Get port statistics.
723856505d3SRahul Lakkireddy  */
724d5b0924bSMatan Azrad static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
725856505d3SRahul Lakkireddy 				struct rte_eth_stats *eth_stats)
726856505d3SRahul Lakkireddy {
72763a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
728856505d3SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
729856505d3SRahul Lakkireddy 	struct sge *s = &adapter->sge;
730856505d3SRahul Lakkireddy 	struct port_stats ps;
731856505d3SRahul Lakkireddy 	unsigned int i;
732856505d3SRahul Lakkireddy 
733856505d3SRahul Lakkireddy 	cxgbe_stats_get(pi, &ps);
734856505d3SRahul Lakkireddy 
735856505d3SRahul Lakkireddy 	/* RX Stats */
736856505d3SRahul Lakkireddy 	eth_stats->imissed  = ps.rx_ovflow0 + ps.rx_ovflow1 +
737856505d3SRahul Lakkireddy 			      ps.rx_ovflow2 + ps.rx_ovflow3 +
738856505d3SRahul Lakkireddy 			      ps.rx_trunc0 + ps.rx_trunc1 +
739856505d3SRahul Lakkireddy 			      ps.rx_trunc2 + ps.rx_trunc3;
740b5d5b4a8SStephen Hemminger 	eth_stats->ierrors  = ps.rx_symbol_err + ps.rx_fcs_err +
741b5d5b4a8SStephen Hemminger 			      ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
74286057c99SIgor Ryzhov 			      ps.rx_len_err;
743856505d3SRahul Lakkireddy 
744856505d3SRahul Lakkireddy 	/* TX Stats */
745856505d3SRahul Lakkireddy 	eth_stats->opackets = ps.tx_frames;
746856505d3SRahul Lakkireddy 	eth_stats->obytes   = ps.tx_octets;
747856505d3SRahul Lakkireddy 	eth_stats->oerrors  = ps.tx_error_frames;
748856505d3SRahul Lakkireddy 
749856505d3SRahul Lakkireddy 	for (i = 0; i < pi->n_rx_qsets; i++) {
7508de9be97SRahul Lakkireddy 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
751856505d3SRahul Lakkireddy 
7528de9be97SRahul Lakkireddy 		eth_stats->ipackets += rxq->stats.pkts;
7538de9be97SRahul Lakkireddy 		eth_stats->ibytes += rxq->stats.rx_bytes;
754856505d3SRahul Lakkireddy 	}
755856505d3SRahul Lakkireddy 
756d5b0924bSMatan Azrad 	return 0;
757856505d3SRahul Lakkireddy }
758856505d3SRahul Lakkireddy 
759856505d3SRahul Lakkireddy /*
760856505d3SRahul Lakkireddy  * Reset port statistics.
761856505d3SRahul Lakkireddy  */
7629970a9adSIgor Romanov static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
763856505d3SRahul Lakkireddy {
76463a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
765856505d3SRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
766856505d3SRahul Lakkireddy 	struct sge *s = &adapter->sge;
767856505d3SRahul Lakkireddy 	unsigned int i;
768856505d3SRahul Lakkireddy 
769856505d3SRahul Lakkireddy 	cxgbe_stats_reset(pi);
770856505d3SRahul Lakkireddy 	for (i = 0; i < pi->n_rx_qsets; i++) {
77118e44206SRahul Lakkireddy 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + i];
772856505d3SRahul Lakkireddy 
77318e44206SRahul Lakkireddy 		memset(&rxq->stats, 0, sizeof(rxq->stats));
774856505d3SRahul Lakkireddy 	}
775856505d3SRahul Lakkireddy 	for (i = 0; i < pi->n_tx_qsets; i++) {
77618e44206SRahul Lakkireddy 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + i];
777856505d3SRahul Lakkireddy 
77818e44206SRahul Lakkireddy 		memset(&txq->stats, 0, sizeof(txq->stats));
779856505d3SRahul Lakkireddy 	}
7809970a9adSIgor Romanov 
7819970a9adSIgor Romanov 	return 0;
782856505d3SRahul Lakkireddy }
783856505d3SRahul Lakkireddy 
78418e44206SRahul Lakkireddy /* Store extended statistics names and its offset in stats structure  */
78518e44206SRahul Lakkireddy struct cxgbe_dev_xstats_name_off {
78618e44206SRahul Lakkireddy 	char name[RTE_ETH_XSTATS_NAME_SIZE];
78718e44206SRahul Lakkireddy 	unsigned int offset;
78818e44206SRahul Lakkireddy };
78918e44206SRahul Lakkireddy 
79018e44206SRahul Lakkireddy static const struct cxgbe_dev_xstats_name_off cxgbe_dev_rxq_stats_strings[] = {
79118e44206SRahul Lakkireddy 	{"packets", offsetof(struct sge_eth_rx_stats, pkts)},
79218e44206SRahul Lakkireddy 	{"bytes", offsetof(struct sge_eth_rx_stats, rx_bytes)},
79318e44206SRahul Lakkireddy 	{"checksum_offloads", offsetof(struct sge_eth_rx_stats, rx_cso)},
79418e44206SRahul Lakkireddy 	{"vlan_extractions", offsetof(struct sge_eth_rx_stats, vlan_ex)},
79518e44206SRahul Lakkireddy 	{"dropped_packets", offsetof(struct sge_eth_rx_stats, rx_drops)},
79618e44206SRahul Lakkireddy };
79718e44206SRahul Lakkireddy 
79818e44206SRahul Lakkireddy static const struct cxgbe_dev_xstats_name_off cxgbe_dev_txq_stats_strings[] = {
79918e44206SRahul Lakkireddy 	{"packets", offsetof(struct sge_eth_tx_stats, pkts)},
80018e44206SRahul Lakkireddy 	{"bytes", offsetof(struct sge_eth_tx_stats, tx_bytes)},
80118e44206SRahul Lakkireddy 	{"tso_requests", offsetof(struct sge_eth_tx_stats, tso)},
80218e44206SRahul Lakkireddy 	{"checksum_offloads", offsetof(struct sge_eth_tx_stats, tx_cso)},
80318e44206SRahul Lakkireddy 	{"vlan_insertions", offsetof(struct sge_eth_tx_stats, vlan_ins)},
80418e44206SRahul Lakkireddy 	{"packet_mapping_errors",
80518e44206SRahul Lakkireddy 	 offsetof(struct sge_eth_tx_stats, mapping_err)},
80618e44206SRahul Lakkireddy 	{"coalesced_wrs", offsetof(struct sge_eth_tx_stats, coal_wr)},
80718e44206SRahul Lakkireddy 	{"coalesced_packets", offsetof(struct sge_eth_tx_stats, coal_pkts)},
80818e44206SRahul Lakkireddy };
80918e44206SRahul Lakkireddy 
81018e44206SRahul Lakkireddy static const struct cxgbe_dev_xstats_name_off cxgbe_dev_port_stats_strings[] = {
81118e44206SRahul Lakkireddy 	{"tx_bytes", offsetof(struct port_stats, tx_octets)},
81218e44206SRahul Lakkireddy 	{"tx_packets", offsetof(struct port_stats, tx_frames)},
81318e44206SRahul Lakkireddy 	{"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
81418e44206SRahul Lakkireddy 	{"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
81518e44206SRahul Lakkireddy 	{"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
81618e44206SRahul Lakkireddy 	{"tx_error_packets", offsetof(struct port_stats, tx_error_frames)},
81718e44206SRahul Lakkireddy 	{"tx_size_64_packets", offsetof(struct port_stats, tx_frames_64)},
81818e44206SRahul Lakkireddy 	{"tx_size_65_to_127_packets",
81918e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_65_127)},
82018e44206SRahul Lakkireddy 	{"tx_size_128_to_255_packets",
82118e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_128_255)},
82218e44206SRahul Lakkireddy 	{"tx_size_256_to_511_packets",
82318e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_256_511)},
82418e44206SRahul Lakkireddy 	{"tx_size_512_to_1023_packets",
82518e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_512_1023)},
82618e44206SRahul Lakkireddy 	{"tx_size_1024_to_1518_packets",
82718e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_1024_1518)},
82818e44206SRahul Lakkireddy 	{"tx_size_1519_to_max_packets",
82918e44206SRahul Lakkireddy 	 offsetof(struct port_stats, tx_frames_1519_max)},
83018e44206SRahul Lakkireddy 	{"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
83118e44206SRahul Lakkireddy 	{"tx_pause_frames", offsetof(struct port_stats, tx_pause)},
83218e44206SRahul Lakkireddy 	{"tx_ppp_pri0_packets", offsetof(struct port_stats, tx_ppp0)},
83318e44206SRahul Lakkireddy 	{"tx_ppp_pri1_packets", offsetof(struct port_stats, tx_ppp1)},
83418e44206SRahul Lakkireddy 	{"tx_ppp_pri2_packets", offsetof(struct port_stats, tx_ppp2)},
83518e44206SRahul Lakkireddy 	{"tx_ppp_pri3_packets", offsetof(struct port_stats, tx_ppp3)},
83618e44206SRahul Lakkireddy 	{"tx_ppp_pri4_packets", offsetof(struct port_stats, tx_ppp4)},
83718e44206SRahul Lakkireddy 	{"tx_ppp_pri5_packets", offsetof(struct port_stats, tx_ppp5)},
83818e44206SRahul Lakkireddy 	{"tx_ppp_pri6_packets", offsetof(struct port_stats, tx_ppp6)},
83918e44206SRahul Lakkireddy 	{"tx_ppp_pri7_packets", offsetof(struct port_stats, tx_ppp7)},
84018e44206SRahul Lakkireddy 	{"rx_bytes", offsetof(struct port_stats, rx_octets)},
84118e44206SRahul Lakkireddy 	{"rx_packets", offsetof(struct port_stats, rx_frames)},
84218e44206SRahul Lakkireddy 	{"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
84318e44206SRahul Lakkireddy 	{"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
84418e44206SRahul Lakkireddy 	{"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
84518e44206SRahul Lakkireddy 	{"rx_too_long_packets", offsetof(struct port_stats, rx_too_long)},
84618e44206SRahul Lakkireddy 	{"rx_jabber_packets", offsetof(struct port_stats, rx_jabber)},
84718e44206SRahul Lakkireddy 	{"rx_fcs_error_packets", offsetof(struct port_stats, rx_fcs_err)},
84818e44206SRahul Lakkireddy 	{"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
84918e44206SRahul Lakkireddy 	{"rx_symbol_error_packets",
85018e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_symbol_err)},
85118e44206SRahul Lakkireddy 	{"rx_short_packets", offsetof(struct port_stats, rx_runt)},
85218e44206SRahul Lakkireddy 	{"rx_size_64_packets", offsetof(struct port_stats, rx_frames_64)},
85318e44206SRahul Lakkireddy 	{"rx_size_65_to_127_packets",
85418e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_65_127)},
85518e44206SRahul Lakkireddy 	{"rx_size_128_to_255_packets",
85618e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_128_255)},
85718e44206SRahul Lakkireddy 	{"rx_size_256_to_511_packets",
85818e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_256_511)},
85918e44206SRahul Lakkireddy 	{"rx_size_512_to_1023_packets",
86018e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_512_1023)},
86118e44206SRahul Lakkireddy 	{"rx_size_1024_to_1518_packets",
86218e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_1024_1518)},
86318e44206SRahul Lakkireddy 	{"rx_size_1519_to_max_packets",
86418e44206SRahul Lakkireddy 	 offsetof(struct port_stats, rx_frames_1519_max)},
86518e44206SRahul Lakkireddy 	{"rx_pause_packets", offsetof(struct port_stats, rx_pause)},
86618e44206SRahul Lakkireddy 	{"rx_ppp_pri0_packets", offsetof(struct port_stats, rx_ppp0)},
86718e44206SRahul Lakkireddy 	{"rx_ppp_pri1_packets", offsetof(struct port_stats, rx_ppp1)},
86818e44206SRahul Lakkireddy 	{"rx_ppp_pri2_packets", offsetof(struct port_stats, rx_ppp2)},
86918e44206SRahul Lakkireddy 	{"rx_ppp_pri3_packets", offsetof(struct port_stats, rx_ppp3)},
87018e44206SRahul Lakkireddy 	{"rx_ppp_pri4_packets", offsetof(struct port_stats, rx_ppp4)},
87118e44206SRahul Lakkireddy 	{"rx_ppp_pri5_packets", offsetof(struct port_stats, rx_ppp5)},
87218e44206SRahul Lakkireddy 	{"rx_ppp_pri6_packets", offsetof(struct port_stats, rx_ppp6)},
87318e44206SRahul Lakkireddy 	{"rx_ppp_pri7_packets", offsetof(struct port_stats, rx_ppp7)},
87418e44206SRahul Lakkireddy 	{"rx_bg0_dropped_packets", offsetof(struct port_stats, rx_ovflow0)},
87518e44206SRahul Lakkireddy 	{"rx_bg1_dropped_packets", offsetof(struct port_stats, rx_ovflow1)},
87618e44206SRahul Lakkireddy 	{"rx_bg2_dropped_packets", offsetof(struct port_stats, rx_ovflow2)},
87718e44206SRahul Lakkireddy 	{"rx_bg3_dropped_packets", offsetof(struct port_stats, rx_ovflow3)},
87818e44206SRahul Lakkireddy 	{"rx_bg0_truncated_packets", offsetof(struct port_stats, rx_trunc0)},
87918e44206SRahul Lakkireddy 	{"rx_bg1_truncated_packets", offsetof(struct port_stats, rx_trunc1)},
88018e44206SRahul Lakkireddy 	{"rx_bg2_truncated_packets", offsetof(struct port_stats, rx_trunc2)},
88118e44206SRahul Lakkireddy 	{"rx_bg3_truncated_packets", offsetof(struct port_stats, rx_trunc3)},
88218e44206SRahul Lakkireddy };
88318e44206SRahul Lakkireddy 
8845ec659a7SNikhil Vasoya static const struct cxgbe_dev_xstats_name_off
8855ec659a7SNikhil Vasoya cxgbevf_dev_port_stats_strings[] = {
8865ec659a7SNikhil Vasoya 	{"tx_bytes", offsetof(struct port_stats, tx_octets)},
8875ec659a7SNikhil Vasoya 	{"tx_broadcast_packets", offsetof(struct port_stats, tx_bcast_frames)},
8885ec659a7SNikhil Vasoya 	{"tx_multicast_packets", offsetof(struct port_stats, tx_mcast_frames)},
8895ec659a7SNikhil Vasoya 	{"tx_unicast_packets", offsetof(struct port_stats, tx_ucast_frames)},
8905ec659a7SNikhil Vasoya 	{"tx_drop_packets", offsetof(struct port_stats, tx_drop)},
8915ec659a7SNikhil Vasoya 	{"rx_broadcast_packets", offsetof(struct port_stats, rx_bcast_frames)},
8925ec659a7SNikhil Vasoya 	{"rx_multicast_packets", offsetof(struct port_stats, rx_mcast_frames)},
8935ec659a7SNikhil Vasoya 	{"rx_unicast_packets", offsetof(struct port_stats, rx_ucast_frames)},
8945ec659a7SNikhil Vasoya 	{"rx_length_error_packets", offsetof(struct port_stats, rx_len_err)},
8955ec659a7SNikhil Vasoya };
8965ec659a7SNikhil Vasoya 
89718e44206SRahul Lakkireddy #define CXGBE_NB_RXQ_STATS RTE_DIM(cxgbe_dev_rxq_stats_strings)
89818e44206SRahul Lakkireddy #define CXGBE_NB_TXQ_STATS RTE_DIM(cxgbe_dev_txq_stats_strings)
89918e44206SRahul Lakkireddy #define CXGBE_NB_PORT_STATS RTE_DIM(cxgbe_dev_port_stats_strings)
9005ec659a7SNikhil Vasoya #define CXGBEVF_NB_PORT_STATS RTE_DIM(cxgbevf_dev_port_stats_strings)
90118e44206SRahul Lakkireddy 
90218e44206SRahul Lakkireddy static u16 cxgbe_dev_xstats_count(struct port_info *pi)
90318e44206SRahul Lakkireddy {
9045ec659a7SNikhil Vasoya 	u16 count;
9055ec659a7SNikhil Vasoya 
9065ec659a7SNikhil Vasoya 	count = (pi->n_tx_qsets * CXGBE_NB_TXQ_STATS) +
90718e44206SRahul Lakkireddy 		(pi->n_rx_qsets * CXGBE_NB_RXQ_STATS);
9085ec659a7SNikhil Vasoya 
9095ec659a7SNikhil Vasoya 	if (is_pf4(pi->adapter) != 0)
9105ec659a7SNikhil Vasoya 		count += CXGBE_NB_PORT_STATS;
9115ec659a7SNikhil Vasoya 	else
9125ec659a7SNikhil Vasoya 		count += CXGBEVF_NB_PORT_STATS;
9135ec659a7SNikhil Vasoya 
9145ec659a7SNikhil Vasoya 	return count;
91518e44206SRahul Lakkireddy }
91618e44206SRahul Lakkireddy 
91718e44206SRahul Lakkireddy static int cxgbe_dev_xstats(struct rte_eth_dev *dev,
91818e44206SRahul Lakkireddy 			    struct rte_eth_xstat_name *xstats_names,
91918e44206SRahul Lakkireddy 			    struct rte_eth_xstat *xstats, unsigned int size)
92018e44206SRahul Lakkireddy {
92118e44206SRahul Lakkireddy 	const struct cxgbe_dev_xstats_name_off *xstats_str;
92218e44206SRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
92318e44206SRahul Lakkireddy 	struct adapter *adap = pi->adapter;
92418e44206SRahul Lakkireddy 	struct sge *s = &adap->sge;
9255ec659a7SNikhil Vasoya 	u16 count, i, qid, nstats;
92618e44206SRahul Lakkireddy 	struct port_stats ps;
92718e44206SRahul Lakkireddy 	u64 *stats_ptr;
92818e44206SRahul Lakkireddy 
92918e44206SRahul Lakkireddy 	count = cxgbe_dev_xstats_count(pi);
93018e44206SRahul Lakkireddy 	if (size < count)
93118e44206SRahul Lakkireddy 		return count;
93218e44206SRahul Lakkireddy 
9335ec659a7SNikhil Vasoya 	if (is_pf4(adap) != 0) {
9345ec659a7SNikhil Vasoya 		/* port stats for PF*/
93518e44206SRahul Lakkireddy 		cxgbe_stats_get(pi, &ps);
9365ec659a7SNikhil Vasoya 		xstats_str = cxgbe_dev_port_stats_strings;
9375ec659a7SNikhil Vasoya 		nstats = CXGBE_NB_PORT_STATS;
9385ec659a7SNikhil Vasoya 	} else {
9395ec659a7SNikhil Vasoya 		/* port stats for VF*/
9405ec659a7SNikhil Vasoya 		cxgbevf_stats_get(pi, &ps);
9415ec659a7SNikhil Vasoya 		xstats_str = cxgbevf_dev_port_stats_strings;
9425ec659a7SNikhil Vasoya 		nstats = CXGBEVF_NB_PORT_STATS;
9435ec659a7SNikhil Vasoya 	}
94418e44206SRahul Lakkireddy 
94518e44206SRahul Lakkireddy 	count = 0;
9465ec659a7SNikhil Vasoya 	for (i = 0; i < nstats; i++, count++) {
94718e44206SRahul Lakkireddy 		if (xstats_names != NULL)
94818e44206SRahul Lakkireddy 			snprintf(xstats_names[count].name,
94918e44206SRahul Lakkireddy 				 sizeof(xstats_names[count].name),
95018e44206SRahul Lakkireddy 				 "%s", xstats_str[i].name);
95118e44206SRahul Lakkireddy 		if (xstats != NULL) {
95218e44206SRahul Lakkireddy 			stats_ptr = RTE_PTR_ADD(&ps,
95318e44206SRahul Lakkireddy 						xstats_str[i].offset);
95418e44206SRahul Lakkireddy 			xstats[count].value = *stats_ptr;
95518e44206SRahul Lakkireddy 			xstats[count].id = count;
95618e44206SRahul Lakkireddy 		}
95718e44206SRahul Lakkireddy 	}
95818e44206SRahul Lakkireddy 
95918e44206SRahul Lakkireddy 	/* per-txq stats */
96018e44206SRahul Lakkireddy 	xstats_str = cxgbe_dev_txq_stats_strings;
96118e44206SRahul Lakkireddy 	for (qid = 0; qid < pi->n_tx_qsets; qid++) {
96218e44206SRahul Lakkireddy 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_txqset + qid];
96318e44206SRahul Lakkireddy 
96418e44206SRahul Lakkireddy 		for (i = 0; i < CXGBE_NB_TXQ_STATS; i++, count++) {
96518e44206SRahul Lakkireddy 			if (xstats_names != NULL)
96618e44206SRahul Lakkireddy 				snprintf(xstats_names[count].name,
96718e44206SRahul Lakkireddy 					 sizeof(xstats_names[count].name),
96818e44206SRahul Lakkireddy 					 "tx_q%u_%s",
96918e44206SRahul Lakkireddy 					 qid, xstats_str[i].name);
97018e44206SRahul Lakkireddy 			if (xstats != NULL) {
97118e44206SRahul Lakkireddy 				stats_ptr = RTE_PTR_ADD(&txq->stats,
97218e44206SRahul Lakkireddy 							xstats_str[i].offset);
97318e44206SRahul Lakkireddy 				xstats[count].value = *stats_ptr;
97418e44206SRahul Lakkireddy 				xstats[count].id = count;
97518e44206SRahul Lakkireddy 			}
97618e44206SRahul Lakkireddy 		}
97718e44206SRahul Lakkireddy 	}
97818e44206SRahul Lakkireddy 
97918e44206SRahul Lakkireddy 	/* per-rxq stats */
98018e44206SRahul Lakkireddy 	xstats_str = cxgbe_dev_rxq_stats_strings;
98118e44206SRahul Lakkireddy 	for (qid = 0; qid < pi->n_rx_qsets; qid++) {
98218e44206SRahul Lakkireddy 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_rxqset + qid];
98318e44206SRahul Lakkireddy 
98418e44206SRahul Lakkireddy 		for (i = 0; i < CXGBE_NB_RXQ_STATS; i++, count++) {
98518e44206SRahul Lakkireddy 			if (xstats_names != NULL)
98618e44206SRahul Lakkireddy 				snprintf(xstats_names[count].name,
98718e44206SRahul Lakkireddy 					 sizeof(xstats_names[count].name),
98818e44206SRahul Lakkireddy 					 "rx_q%u_%s",
98918e44206SRahul Lakkireddy 					 qid, xstats_str[i].name);
99018e44206SRahul Lakkireddy 			if (xstats != NULL) {
99118e44206SRahul Lakkireddy 				stats_ptr = RTE_PTR_ADD(&rxq->stats,
99218e44206SRahul Lakkireddy 							xstats_str[i].offset);
99318e44206SRahul Lakkireddy 				xstats[count].value = *stats_ptr;
99418e44206SRahul Lakkireddy 				xstats[count].id = count;
99518e44206SRahul Lakkireddy 			}
99618e44206SRahul Lakkireddy 		}
99718e44206SRahul Lakkireddy 	}
99818e44206SRahul Lakkireddy 
99918e44206SRahul Lakkireddy 	return count;
100018e44206SRahul Lakkireddy }
100118e44206SRahul Lakkireddy 
100218e44206SRahul Lakkireddy /* Get port extended statistics by ID. */
10035ec659a7SNikhil Vasoya int cxgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
100418e44206SRahul Lakkireddy 			       const uint64_t *ids, uint64_t *values,
100518e44206SRahul Lakkireddy 			       unsigned int n)
100618e44206SRahul Lakkireddy {
100718e44206SRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
100818e44206SRahul Lakkireddy 	struct rte_eth_xstat *xstats_copy;
100918e44206SRahul Lakkireddy 	u16 count, i;
101018e44206SRahul Lakkireddy 	int ret = 0;
101118e44206SRahul Lakkireddy 
101218e44206SRahul Lakkireddy 	count = cxgbe_dev_xstats_count(pi);
101318e44206SRahul Lakkireddy 	if (ids == NULL || values == NULL)
101418e44206SRahul Lakkireddy 		return count;
101518e44206SRahul Lakkireddy 
101618e44206SRahul Lakkireddy 	xstats_copy = rte_calloc(NULL, count, sizeof(*xstats_copy), 0);
101718e44206SRahul Lakkireddy 	if (xstats_copy == NULL)
101818e44206SRahul Lakkireddy 		return -ENOMEM;
101918e44206SRahul Lakkireddy 
102018e44206SRahul Lakkireddy 	cxgbe_dev_xstats(dev, NULL, xstats_copy, count);
102118e44206SRahul Lakkireddy 
102218e44206SRahul Lakkireddy 	for (i = 0; i < n; i++) {
102318e44206SRahul Lakkireddy 		if (ids[i] >= count) {
102418e44206SRahul Lakkireddy 			ret = -EINVAL;
102518e44206SRahul Lakkireddy 			goto out_err;
102618e44206SRahul Lakkireddy 		}
102718e44206SRahul Lakkireddy 		values[i] = xstats_copy[ids[i]].value;
102818e44206SRahul Lakkireddy 	}
102918e44206SRahul Lakkireddy 
103018e44206SRahul Lakkireddy 	ret = n;
103118e44206SRahul Lakkireddy 
103218e44206SRahul Lakkireddy out_err:
103318e44206SRahul Lakkireddy 	rte_free(xstats_copy);
103418e44206SRahul Lakkireddy 	return ret;
103518e44206SRahul Lakkireddy }
103618e44206SRahul Lakkireddy 
103718e44206SRahul Lakkireddy /* Get names of port extended statistics by ID. */
10385ec659a7SNikhil Vasoya int cxgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
103918e44206SRahul Lakkireddy 				     struct rte_eth_xstat_name *xnames,
104018e44206SRahul Lakkireddy 				     const uint64_t *ids, unsigned int n)
104118e44206SRahul Lakkireddy {
104218e44206SRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
104318e44206SRahul Lakkireddy 	struct rte_eth_xstat_name *xnames_copy;
104418e44206SRahul Lakkireddy 	u16 count, i;
104518e44206SRahul Lakkireddy 	int ret = 0;
104618e44206SRahul Lakkireddy 
104718e44206SRahul Lakkireddy 	count = cxgbe_dev_xstats_count(pi);
104818e44206SRahul Lakkireddy 	if (ids == NULL || xnames == NULL)
104918e44206SRahul Lakkireddy 		return count;
105018e44206SRahul Lakkireddy 
105118e44206SRahul Lakkireddy 	xnames_copy = rte_calloc(NULL, count, sizeof(*xnames_copy), 0);
105218e44206SRahul Lakkireddy 	if (xnames_copy == NULL)
105318e44206SRahul Lakkireddy 		return -ENOMEM;
105418e44206SRahul Lakkireddy 
105518e44206SRahul Lakkireddy 	cxgbe_dev_xstats(dev, xnames_copy, NULL, count);
105618e44206SRahul Lakkireddy 
105718e44206SRahul Lakkireddy 	for (i = 0; i < n; i++) {
105818e44206SRahul Lakkireddy 		if (ids[i] >= count) {
105918e44206SRahul Lakkireddy 			ret = -EINVAL;
106018e44206SRahul Lakkireddy 			goto out_err;
106118e44206SRahul Lakkireddy 		}
106218e44206SRahul Lakkireddy 		rte_strlcpy(xnames[i].name, xnames_copy[ids[i]].name,
106318e44206SRahul Lakkireddy 			    sizeof(xnames[i].name));
106418e44206SRahul Lakkireddy 	}
106518e44206SRahul Lakkireddy 
106618e44206SRahul Lakkireddy 	ret = n;
106718e44206SRahul Lakkireddy 
106818e44206SRahul Lakkireddy out_err:
106918e44206SRahul Lakkireddy 	rte_free(xnames_copy);
107018e44206SRahul Lakkireddy 	return ret;
107118e44206SRahul Lakkireddy }
107218e44206SRahul Lakkireddy 
107318e44206SRahul Lakkireddy /* Get port extended statistics. */
10745ec659a7SNikhil Vasoya int cxgbe_dev_xstats_get(struct rte_eth_dev *dev,
107518e44206SRahul Lakkireddy 			 struct rte_eth_xstat *xstats, unsigned int n)
107618e44206SRahul Lakkireddy {
107718e44206SRahul Lakkireddy 	return cxgbe_dev_xstats(dev, NULL, xstats, n);
107818e44206SRahul Lakkireddy }
107918e44206SRahul Lakkireddy 
108018e44206SRahul Lakkireddy /* Get names of port extended statistics. */
10815ec659a7SNikhil Vasoya int cxgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
108218e44206SRahul Lakkireddy 			       struct rte_eth_xstat_name *xstats_names,
108318e44206SRahul Lakkireddy 			       unsigned int n)
108418e44206SRahul Lakkireddy {
108518e44206SRahul Lakkireddy 	return cxgbe_dev_xstats(dev, xstats_names, NULL, n);
108618e44206SRahul Lakkireddy }
108718e44206SRahul Lakkireddy 
108818e44206SRahul Lakkireddy /* Reset port extended statistics. */
108918e44206SRahul Lakkireddy static int cxgbe_dev_xstats_reset(struct rte_eth_dev *dev)
109018e44206SRahul Lakkireddy {
109118e44206SRahul Lakkireddy 	return cxgbe_dev_stats_reset(dev);
109218e44206SRahul Lakkireddy }
109318e44206SRahul Lakkireddy 
1094631dfc71SRahul Lakkireddy static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
1095631dfc71SRahul Lakkireddy 			       struct rte_eth_fc_conf *fc_conf)
1096631dfc71SRahul Lakkireddy {
109763a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
1098631dfc71SRahul Lakkireddy 	struct link_config *lc = &pi->link_cfg;
1099a83041b1SKarra Satwik 	u8 rx_pause = 0, tx_pause = 0;
1100a83041b1SKarra Satwik 	u32 caps = lc->link_caps;
1101631dfc71SRahul Lakkireddy 
1102a83041b1SKarra Satwik 	if (caps & FW_PORT_CAP32_ANEG)
1103a83041b1SKarra Satwik 		fc_conf->autoneg = 1;
1104a83041b1SKarra Satwik 
1105a83041b1SKarra Satwik 	if (caps & FW_PORT_CAP32_FC_TX)
1106a83041b1SKarra Satwik 		tx_pause = 1;
1107a83041b1SKarra Satwik 
1108a83041b1SKarra Satwik 	if (caps & FW_PORT_CAP32_FC_RX)
1109a83041b1SKarra Satwik 		rx_pause = 1;
1110631dfc71SRahul Lakkireddy 
1111631dfc71SRahul Lakkireddy 	if (rx_pause && tx_pause)
1112631dfc71SRahul Lakkireddy 		fc_conf->mode = RTE_FC_FULL;
1113631dfc71SRahul Lakkireddy 	else if (rx_pause)
1114631dfc71SRahul Lakkireddy 		fc_conf->mode = RTE_FC_RX_PAUSE;
1115631dfc71SRahul Lakkireddy 	else if (tx_pause)
1116631dfc71SRahul Lakkireddy 		fc_conf->mode = RTE_FC_TX_PAUSE;
1117631dfc71SRahul Lakkireddy 	else
1118631dfc71SRahul Lakkireddy 		fc_conf->mode = RTE_FC_NONE;
1119631dfc71SRahul Lakkireddy 	return 0;
1120631dfc71SRahul Lakkireddy }
1121631dfc71SRahul Lakkireddy 
1122631dfc71SRahul Lakkireddy static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
1123631dfc71SRahul Lakkireddy 			       struct rte_eth_fc_conf *fc_conf)
1124631dfc71SRahul Lakkireddy {
112563a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
1126631dfc71SRahul Lakkireddy 	struct link_config *lc = &pi->link_cfg;
1127a83041b1SKarra Satwik 	u32 new_caps = lc->admin_caps;
1128a83041b1SKarra Satwik 	u8 tx_pause = 0, rx_pause = 0;
1129a83041b1SKarra Satwik 	int ret;
1130631dfc71SRahul Lakkireddy 
1131a83041b1SKarra Satwik 	if (fc_conf->mode == RTE_FC_FULL) {
1132a83041b1SKarra Satwik 		tx_pause = 1;
1133a83041b1SKarra Satwik 		rx_pause = 1;
1134a83041b1SKarra Satwik 	} else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
1135a83041b1SKarra Satwik 		tx_pause = 1;
1136a83041b1SKarra Satwik 	} else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
1137a83041b1SKarra Satwik 		rx_pause = 1;
1138631dfc71SRahul Lakkireddy 	}
1139631dfc71SRahul Lakkireddy 
1140a83041b1SKarra Satwik 	ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
1141a83041b1SKarra Satwik 				rx_pause, &new_caps);
1142a83041b1SKarra Satwik 	if (ret != 0)
1143a83041b1SKarra Satwik 		return ret;
1144631dfc71SRahul Lakkireddy 
1145a83041b1SKarra Satwik 	if (!fc_conf->autoneg) {
1146a83041b1SKarra Satwik 		if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
1147a83041b1SKarra Satwik 			new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
1148a83041b1SKarra Satwik 	} else {
1149a83041b1SKarra Satwik 		new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
1150a83041b1SKarra Satwik 	}
1151631dfc71SRahul Lakkireddy 
1152a83041b1SKarra Satwik 	if (new_caps != lc->admin_caps) {
1153a83041b1SKarra Satwik 		ret = t4_link_l1cfg(pi, new_caps);
1154a83041b1SKarra Satwik 		if (ret == 0)
1155a83041b1SKarra Satwik 			lc->admin_caps = new_caps;
1156a83041b1SKarra Satwik 	}
1157a83041b1SKarra Satwik 
1158a83041b1SKarra Satwik 	return ret;
1159631dfc71SRahul Lakkireddy }
1160631dfc71SRahul Lakkireddy 
1161011ebc23SKumar Sanghvi const uint32_t *
116278a38edfSJianfeng Tan cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
116378a38edfSJianfeng Tan {
116478a38edfSJianfeng Tan 	static const uint32_t ptypes[] = {
116578a38edfSJianfeng Tan 		RTE_PTYPE_L3_IPV4,
116678a38edfSJianfeng Tan 		RTE_PTYPE_L3_IPV6,
116778a38edfSJianfeng Tan 		RTE_PTYPE_UNKNOWN
116878a38edfSJianfeng Tan 	};
116978a38edfSJianfeng Tan 
117078a38edfSJianfeng Tan 	if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
117178a38edfSJianfeng Tan 		return ptypes;
117278a38edfSJianfeng Tan 	return NULL;
117378a38edfSJianfeng Tan }
117478a38edfSJianfeng Tan 
117508e21af9SKumar Sanghvi /* Update RSS hash configuration
117608e21af9SKumar Sanghvi  */
117708e21af9SKumar Sanghvi static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
117808e21af9SKumar Sanghvi 				     struct rte_eth_rss_conf *rss_conf)
117908e21af9SKumar Sanghvi {
118063a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
118108e21af9SKumar Sanghvi 	struct adapter *adapter = pi->adapter;
118208e21af9SKumar Sanghvi 	int err;
118308e21af9SKumar Sanghvi 
118408e21af9SKumar Sanghvi 	err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
118508e21af9SKumar Sanghvi 	if (err)
118608e21af9SKumar Sanghvi 		return err;
118708e21af9SKumar Sanghvi 
118808e21af9SKumar Sanghvi 	pi->rss_hf = rss_conf->rss_hf;
118908e21af9SKumar Sanghvi 
119008e21af9SKumar Sanghvi 	if (rss_conf->rss_key) {
119108e21af9SKumar Sanghvi 		u32 key[10], mod_key[10];
119208e21af9SKumar Sanghvi 		int i, j;
119308e21af9SKumar Sanghvi 
119408e21af9SKumar Sanghvi 		memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
119508e21af9SKumar Sanghvi 
119608e21af9SKumar Sanghvi 		for (i = 9, j = 0; i >= 0; i--, j++)
119708e21af9SKumar Sanghvi 			mod_key[j] = cpu_to_be32(key[i]);
119808e21af9SKumar Sanghvi 
119908e21af9SKumar Sanghvi 		t4_write_rss_key(adapter, mod_key, -1);
120008e21af9SKumar Sanghvi 	}
120108e21af9SKumar Sanghvi 
120208e21af9SKumar Sanghvi 	return 0;
120308e21af9SKumar Sanghvi }
120408e21af9SKumar Sanghvi 
120576aba8d7SKumar Sanghvi /* Get RSS hash configuration
120676aba8d7SKumar Sanghvi  */
120776aba8d7SKumar Sanghvi static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
120876aba8d7SKumar Sanghvi 				       struct rte_eth_rss_conf *rss_conf)
120976aba8d7SKumar Sanghvi {
121063a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
121176aba8d7SKumar Sanghvi 	struct adapter *adapter = pi->adapter;
121276aba8d7SKumar Sanghvi 	u64 rss_hf = 0;
121376aba8d7SKumar Sanghvi 	u64 flags = 0;
121476aba8d7SKumar Sanghvi 	int err;
121576aba8d7SKumar Sanghvi 
121676aba8d7SKumar Sanghvi 	err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
121776aba8d7SKumar Sanghvi 				    &flags, NULL);
121876aba8d7SKumar Sanghvi 
121976aba8d7SKumar Sanghvi 	if (err)
122076aba8d7SKumar Sanghvi 		return err;
122176aba8d7SKumar Sanghvi 
122276aba8d7SKumar Sanghvi 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
1223d97aa415SRahul Lakkireddy 		rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
122476aba8d7SKumar Sanghvi 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
1225d97aa415SRahul Lakkireddy 			rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
122676aba8d7SKumar Sanghvi 	}
122776aba8d7SKumar Sanghvi 
122876aba8d7SKumar Sanghvi 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1229d97aa415SRahul Lakkireddy 		rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
123076aba8d7SKumar Sanghvi 
123176aba8d7SKumar Sanghvi 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
123276aba8d7SKumar Sanghvi 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
123376aba8d7SKumar Sanghvi 		if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
123476aba8d7SKumar Sanghvi 			rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
123576aba8d7SKumar Sanghvi 	}
123676aba8d7SKumar Sanghvi 
123776aba8d7SKumar Sanghvi 	if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1238d97aa415SRahul Lakkireddy 		rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
123976aba8d7SKumar Sanghvi 
124076aba8d7SKumar Sanghvi 	rss_conf->rss_hf = rss_hf;
124176aba8d7SKumar Sanghvi 
124276aba8d7SKumar Sanghvi 	if (rss_conf->rss_key) {
124376aba8d7SKumar Sanghvi 		u32 key[10], mod_key[10];
124476aba8d7SKumar Sanghvi 		int i, j;
124576aba8d7SKumar Sanghvi 
124676aba8d7SKumar Sanghvi 		t4_read_rss_key(adapter, key);
124776aba8d7SKumar Sanghvi 
124876aba8d7SKumar Sanghvi 		for (i = 9, j = 0; i >= 0; i--, j++)
124976aba8d7SKumar Sanghvi 			mod_key[j] = be32_to_cpu(key[i]);
125076aba8d7SKumar Sanghvi 
125176aba8d7SKumar Sanghvi 		memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
125276aba8d7SKumar Sanghvi 	}
125376aba8d7SKumar Sanghvi 
125476aba8d7SKumar Sanghvi 	return 0;
125576aba8d7SKumar Sanghvi }
125676aba8d7SKumar Sanghvi 
1257f2d344dfSRahul Lakkireddy static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
1258f2d344dfSRahul Lakkireddy 				     struct rte_eth_rss_reta_entry64 *reta_conf,
1259f2d344dfSRahul Lakkireddy 				     uint16_t reta_size)
1260f2d344dfSRahul Lakkireddy {
1261f2d344dfSRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
1262f2d344dfSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
1263f2d344dfSRahul Lakkireddy 	u16 i, idx, shift, *rss;
1264f2d344dfSRahul Lakkireddy 	int ret;
1265f2d344dfSRahul Lakkireddy 
1266f2d344dfSRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE))
1267f2d344dfSRahul Lakkireddy 		return -ENOMEM;
1268f2d344dfSRahul Lakkireddy 
1269f2d344dfSRahul Lakkireddy 	if (!reta_size || reta_size > pi->rss_size)
1270f2d344dfSRahul Lakkireddy 		return -EINVAL;
1271f2d344dfSRahul Lakkireddy 
1272f2d344dfSRahul Lakkireddy 	rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
1273f2d344dfSRahul Lakkireddy 	if (!rss)
1274f2d344dfSRahul Lakkireddy 		return -ENOMEM;
1275f2d344dfSRahul Lakkireddy 
1276f2d344dfSRahul Lakkireddy 	rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
1277f2d344dfSRahul Lakkireddy 	for (i = 0; i < reta_size; i++) {
1278f2d344dfSRahul Lakkireddy 		idx = i / RTE_RETA_GROUP_SIZE;
1279f2d344dfSRahul Lakkireddy 		shift = i % RTE_RETA_GROUP_SIZE;
1280f2d344dfSRahul Lakkireddy 		if (!(reta_conf[idx].mask & (1ULL << shift)))
1281f2d344dfSRahul Lakkireddy 			continue;
1282f2d344dfSRahul Lakkireddy 
1283f2d344dfSRahul Lakkireddy 		rss[i] = reta_conf[idx].reta[shift];
1284f2d344dfSRahul Lakkireddy 	}
1285f2d344dfSRahul Lakkireddy 
1286f2d344dfSRahul Lakkireddy 	ret = cxgbe_write_rss(pi, rss);
1287f2d344dfSRahul Lakkireddy 	if (!ret)
1288f2d344dfSRahul Lakkireddy 		rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
1289f2d344dfSRahul Lakkireddy 
1290f2d344dfSRahul Lakkireddy 	rte_free(rss);
1291f2d344dfSRahul Lakkireddy 	return ret;
1292f2d344dfSRahul Lakkireddy }
1293f2d344dfSRahul Lakkireddy 
1294f2d344dfSRahul Lakkireddy static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
1295f2d344dfSRahul Lakkireddy 				    struct rte_eth_rss_reta_entry64 *reta_conf,
1296f2d344dfSRahul Lakkireddy 				    uint16_t reta_size)
1297f2d344dfSRahul Lakkireddy {
1298f2d344dfSRahul Lakkireddy 	struct port_info *pi = dev->data->dev_private;
1299f2d344dfSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
1300f2d344dfSRahul Lakkireddy 	u16 i, idx, shift;
1301f2d344dfSRahul Lakkireddy 
1302f2d344dfSRahul Lakkireddy 	if (!(adapter->flags & FULL_INIT_DONE))
1303f2d344dfSRahul Lakkireddy 		return -ENOMEM;
1304f2d344dfSRahul Lakkireddy 
1305f2d344dfSRahul Lakkireddy 	if (!reta_size || reta_size > pi->rss_size)
1306f2d344dfSRahul Lakkireddy 		return -EINVAL;
1307f2d344dfSRahul Lakkireddy 
1308f2d344dfSRahul Lakkireddy 	for (i = 0; i < reta_size; i++) {
1309f2d344dfSRahul Lakkireddy 		idx = i / RTE_RETA_GROUP_SIZE;
1310f2d344dfSRahul Lakkireddy 		shift = i % RTE_RETA_GROUP_SIZE;
1311f2d344dfSRahul Lakkireddy 		if (!(reta_conf[idx].mask & (1ULL << shift)))
1312f2d344dfSRahul Lakkireddy 			continue;
1313f2d344dfSRahul Lakkireddy 
1314f2d344dfSRahul Lakkireddy 		reta_conf[idx].reta[shift] = pi->rss[i];
1315f2d344dfSRahul Lakkireddy 	}
1316f2d344dfSRahul Lakkireddy 
1317f2d344dfSRahul Lakkireddy 	return 0;
1318f2d344dfSRahul Lakkireddy }
1319f2d344dfSRahul Lakkireddy 
1320fe0bd9eeSRahul Lakkireddy static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
1321fe0bd9eeSRahul Lakkireddy {
1322fe0bd9eeSRahul Lakkireddy 	RTE_SET_USED(dev);
1323fe0bd9eeSRahul Lakkireddy 	return EEPROMSIZE;
1324fe0bd9eeSRahul Lakkireddy }
1325fe0bd9eeSRahul Lakkireddy 
1326fe0bd9eeSRahul Lakkireddy /**
1327fe0bd9eeSRahul Lakkireddy  * eeprom_ptov - translate a physical EEPROM address to virtual
1328fe0bd9eeSRahul Lakkireddy  * @phys_addr: the physical EEPROM address
1329fe0bd9eeSRahul Lakkireddy  * @fn: the PCI function number
1330fe0bd9eeSRahul Lakkireddy  * @sz: size of function-specific area
1331fe0bd9eeSRahul Lakkireddy  *
1332fe0bd9eeSRahul Lakkireddy  * Translate a physical EEPROM address to virtual.  The first 1K is
1333fe0bd9eeSRahul Lakkireddy  * accessed through virtual addresses starting at 31K, the rest is
1334fe0bd9eeSRahul Lakkireddy  * accessed through virtual addresses starting at 0.
1335fe0bd9eeSRahul Lakkireddy  *
1336fe0bd9eeSRahul Lakkireddy  * The mapping is as follows:
1337fe0bd9eeSRahul Lakkireddy  * [0..1K) -> [31K..32K)
1338fe0bd9eeSRahul Lakkireddy  * [1K..1K+A) -> [31K-A..31K)
1339fe0bd9eeSRahul Lakkireddy  * [1K+A..ES) -> [0..ES-A-1K)
1340fe0bd9eeSRahul Lakkireddy  *
1341fe0bd9eeSRahul Lakkireddy  * where A = @fn * @sz, and ES = EEPROM size.
1342fe0bd9eeSRahul Lakkireddy  */
1343fe0bd9eeSRahul Lakkireddy static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1344fe0bd9eeSRahul Lakkireddy {
1345fe0bd9eeSRahul Lakkireddy 	fn *= sz;
1346fe0bd9eeSRahul Lakkireddy 	if (phys_addr < 1024)
1347fe0bd9eeSRahul Lakkireddy 		return phys_addr + (31 << 10);
1348fe0bd9eeSRahul Lakkireddy 	if (phys_addr < 1024 + fn)
1349fe0bd9eeSRahul Lakkireddy 		return fn + phys_addr - 1024;
1350fe0bd9eeSRahul Lakkireddy 	if (phys_addr < EEPROMSIZE)
1351fe0bd9eeSRahul Lakkireddy 		return phys_addr - 1024 - fn;
1352fe0bd9eeSRahul Lakkireddy 	if (phys_addr < EEPROMVSIZE)
1353fe0bd9eeSRahul Lakkireddy 		return phys_addr - 1024;
1354fe0bd9eeSRahul Lakkireddy 	return -EINVAL;
1355fe0bd9eeSRahul Lakkireddy }
1356fe0bd9eeSRahul Lakkireddy 
1357fe0bd9eeSRahul Lakkireddy /* The next two routines implement eeprom read/write from physical addresses.
1358fe0bd9eeSRahul Lakkireddy  */
1359fe0bd9eeSRahul Lakkireddy static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1360fe0bd9eeSRahul Lakkireddy {
1361fe0bd9eeSRahul Lakkireddy 	int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1362fe0bd9eeSRahul Lakkireddy 
1363fe0bd9eeSRahul Lakkireddy 	if (vaddr >= 0)
1364fe0bd9eeSRahul Lakkireddy 		vaddr = t4_seeprom_read(adap, vaddr, v);
1365fe0bd9eeSRahul Lakkireddy 	return vaddr < 0 ? vaddr : 0;
1366fe0bd9eeSRahul Lakkireddy }
1367fe0bd9eeSRahul Lakkireddy 
1368fe0bd9eeSRahul Lakkireddy static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1369fe0bd9eeSRahul Lakkireddy {
1370fe0bd9eeSRahul Lakkireddy 	int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1371fe0bd9eeSRahul Lakkireddy 
1372fe0bd9eeSRahul Lakkireddy 	if (vaddr >= 0)
1373fe0bd9eeSRahul Lakkireddy 		vaddr = t4_seeprom_write(adap, vaddr, v);
1374fe0bd9eeSRahul Lakkireddy 	return vaddr < 0 ? vaddr : 0;
1375fe0bd9eeSRahul Lakkireddy }
1376fe0bd9eeSRahul Lakkireddy 
1377fe0bd9eeSRahul Lakkireddy #define EEPROM_MAGIC 0x38E2F10C
1378fe0bd9eeSRahul Lakkireddy 
1379fe0bd9eeSRahul Lakkireddy static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
1380fe0bd9eeSRahul Lakkireddy 			    struct rte_dev_eeprom_info *e)
1381fe0bd9eeSRahul Lakkireddy {
138263a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
1383fe0bd9eeSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
1384fe0bd9eeSRahul Lakkireddy 	u32 i, err = 0;
1385fe0bd9eeSRahul Lakkireddy 	u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
1386fe0bd9eeSRahul Lakkireddy 
1387fe0bd9eeSRahul Lakkireddy 	if (!buf)
1388fe0bd9eeSRahul Lakkireddy 		return -ENOMEM;
1389fe0bd9eeSRahul Lakkireddy 
1390fe0bd9eeSRahul Lakkireddy 	e->magic = EEPROM_MAGIC;
1391fe0bd9eeSRahul Lakkireddy 	for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
1392fe0bd9eeSRahul Lakkireddy 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1393fe0bd9eeSRahul Lakkireddy 
1394fe0bd9eeSRahul Lakkireddy 	if (!err)
1395fe0bd9eeSRahul Lakkireddy 		rte_memcpy(e->data, buf + e->offset, e->length);
1396fe0bd9eeSRahul Lakkireddy 	rte_free(buf);
1397fe0bd9eeSRahul Lakkireddy 	return err;
1398fe0bd9eeSRahul Lakkireddy }
1399fe0bd9eeSRahul Lakkireddy 
1400fe0bd9eeSRahul Lakkireddy static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
1401fe0bd9eeSRahul Lakkireddy 			    struct rte_dev_eeprom_info *eeprom)
1402fe0bd9eeSRahul Lakkireddy {
140363a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
1404fe0bd9eeSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
1405fe0bd9eeSRahul Lakkireddy 	u8 *buf;
1406fe0bd9eeSRahul Lakkireddy 	int err = 0;
1407fe0bd9eeSRahul Lakkireddy 	u32 aligned_offset, aligned_len, *p;
1408fe0bd9eeSRahul Lakkireddy 
1409fe0bd9eeSRahul Lakkireddy 	if (eeprom->magic != EEPROM_MAGIC)
1410fe0bd9eeSRahul Lakkireddy 		return -EINVAL;
1411fe0bd9eeSRahul Lakkireddy 
1412fe0bd9eeSRahul Lakkireddy 	aligned_offset = eeprom->offset & ~3;
1413fe0bd9eeSRahul Lakkireddy 	aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
1414fe0bd9eeSRahul Lakkireddy 
1415fe0bd9eeSRahul Lakkireddy 	if (adapter->pf > 0) {
1416fe0bd9eeSRahul Lakkireddy 		u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1417fe0bd9eeSRahul Lakkireddy 
1418fe0bd9eeSRahul Lakkireddy 		if (aligned_offset < start ||
1419fe0bd9eeSRahul Lakkireddy 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
1420fe0bd9eeSRahul Lakkireddy 			return -EPERM;
1421fe0bd9eeSRahul Lakkireddy 	}
1422fe0bd9eeSRahul Lakkireddy 
1423fe0bd9eeSRahul Lakkireddy 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
1424fe0bd9eeSRahul Lakkireddy 		/* RMW possibly needed for first or last words.
1425fe0bd9eeSRahul Lakkireddy 		 */
1426fe0bd9eeSRahul Lakkireddy 		buf = rte_zmalloc(NULL, aligned_len, 0);
1427fe0bd9eeSRahul Lakkireddy 		if (!buf)
1428fe0bd9eeSRahul Lakkireddy 			return -ENOMEM;
1429fe0bd9eeSRahul Lakkireddy 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1430fe0bd9eeSRahul Lakkireddy 		if (!err && aligned_len > 4)
1431fe0bd9eeSRahul Lakkireddy 			err = eeprom_rd_phys(adapter,
1432fe0bd9eeSRahul Lakkireddy 					     aligned_offset + aligned_len - 4,
1433fe0bd9eeSRahul Lakkireddy 					     (u32 *)&buf[aligned_len - 4]);
1434fe0bd9eeSRahul Lakkireddy 		if (err)
1435fe0bd9eeSRahul Lakkireddy 			goto out;
1436fe0bd9eeSRahul Lakkireddy 		rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
1437fe0bd9eeSRahul Lakkireddy 			   eeprom->length);
1438fe0bd9eeSRahul Lakkireddy 	} else {
1439fe0bd9eeSRahul Lakkireddy 		buf = eeprom->data;
1440fe0bd9eeSRahul Lakkireddy 	}
1441fe0bd9eeSRahul Lakkireddy 
1442fe0bd9eeSRahul Lakkireddy 	err = t4_seeprom_wp(adapter, false);
1443fe0bd9eeSRahul Lakkireddy 	if (err)
1444fe0bd9eeSRahul Lakkireddy 		goto out;
1445fe0bd9eeSRahul Lakkireddy 
1446fe0bd9eeSRahul Lakkireddy 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1447fe0bd9eeSRahul Lakkireddy 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
1448fe0bd9eeSRahul Lakkireddy 		aligned_offset += 4;
1449fe0bd9eeSRahul Lakkireddy 	}
1450fe0bd9eeSRahul Lakkireddy 
1451fe0bd9eeSRahul Lakkireddy 	if (!err)
1452fe0bd9eeSRahul Lakkireddy 		err = t4_seeprom_wp(adapter, true);
1453fe0bd9eeSRahul Lakkireddy out:
1454fe0bd9eeSRahul Lakkireddy 	if (buf != eeprom->data)
1455fe0bd9eeSRahul Lakkireddy 		rte_free(buf);
1456fe0bd9eeSRahul Lakkireddy 	return err;
1457fe0bd9eeSRahul Lakkireddy }
1458fe0bd9eeSRahul Lakkireddy 
145917ba077cSRahul Lakkireddy static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
146017ba077cSRahul Lakkireddy {
146163a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
146217ba077cSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
146317ba077cSRahul Lakkireddy 
146417ba077cSRahul Lakkireddy 	return t4_get_regs_len(adapter) / sizeof(uint32_t);
146517ba077cSRahul Lakkireddy }
146617ba077cSRahul Lakkireddy 
146717ba077cSRahul Lakkireddy static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
146817ba077cSRahul Lakkireddy 			  struct rte_dev_reg_info *regs)
146917ba077cSRahul Lakkireddy {
147063a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
147117ba077cSRahul Lakkireddy 	struct adapter *adapter = pi->adapter;
147217ba077cSRahul Lakkireddy 
147317ba077cSRahul Lakkireddy 	regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
147417ba077cSRahul Lakkireddy 		(CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
147517ba077cSRahul Lakkireddy 		(1 << 16);
1476001a1c0fSZyta Szpak 
1477001a1c0fSZyta Szpak 	if (regs->data == NULL) {
1478001a1c0fSZyta Szpak 		regs->length = cxgbe_get_regs_len(eth_dev);
1479001a1c0fSZyta Szpak 		regs->width = sizeof(uint32_t);
1480001a1c0fSZyta Szpak 
1481001a1c0fSZyta Szpak 		return 0;
1482001a1c0fSZyta Szpak 	}
1483001a1c0fSZyta Szpak 
148417ba077cSRahul Lakkireddy 	t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
148517ba077cSRahul Lakkireddy 
148617ba077cSRahul Lakkireddy 	return 0;
148717ba077cSRahul Lakkireddy }
148817ba077cSRahul Lakkireddy 
14896d13ea8eSOlivier Matz int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
14900c4a5dfcSKumar Sanghvi {
149163a97e58SStephen Hemminger 	struct port_info *pi = dev->data->dev_private;
14920c4a5dfcSKumar Sanghvi 	int ret;
14930c4a5dfcSKumar Sanghvi 
1494fefee7a6SShagun Agrawal 	ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
14950c4a5dfcSKumar Sanghvi 	if (ret < 0) {
14960c4a5dfcSKumar Sanghvi 		dev_err(adapter, "failed to set mac addr; err = %d\n",
14970c4a5dfcSKumar Sanghvi 			ret);
1498caccf8b3SOlivier Matz 		return ret;
14990c4a5dfcSKumar Sanghvi 	}
15000c4a5dfcSKumar Sanghvi 	pi->xact_addr_filt = ret;
1501caccf8b3SOlivier Matz 	return 0;
15020c4a5dfcSKumar Sanghvi }
15030c4a5dfcSKumar Sanghvi 
150462aafe03SKarra Satwik static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
150562aafe03SKarra Satwik 					   struct rte_eth_fec_capa *capa_arr)
150662aafe03SKarra Satwik {
150762aafe03SKarra Satwik 	int num = 0;
150862aafe03SKarra Satwik 
150962aafe03SKarra Satwik 	if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
151062aafe03SKarra Satwik 		if (capa_arr) {
151162aafe03SKarra Satwik 			capa_arr[num].speed = ETH_SPEED_NUM_100G;
151262aafe03SKarra Satwik 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
151362aafe03SKarra Satwik 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
151462aafe03SKarra Satwik 		}
151562aafe03SKarra Satwik 		num++;
151662aafe03SKarra Satwik 	}
151762aafe03SKarra Satwik 
151862aafe03SKarra Satwik 	if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
151962aafe03SKarra Satwik 		if (capa_arr) {
152062aafe03SKarra Satwik 			capa_arr[num].speed = ETH_SPEED_NUM_50G;
152162aafe03SKarra Satwik 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
152262aafe03SKarra Satwik 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
152362aafe03SKarra Satwik 		}
152462aafe03SKarra Satwik 		num++;
152562aafe03SKarra Satwik 	}
152662aafe03SKarra Satwik 
152762aafe03SKarra Satwik 	if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
152862aafe03SKarra Satwik 		if (capa_arr) {
152962aafe03SKarra Satwik 			capa_arr[num].speed = ETH_SPEED_NUM_25G;
153062aafe03SKarra Satwik 			capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
153162aafe03SKarra Satwik 					     RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
153262aafe03SKarra Satwik 					     RTE_ETH_FEC_MODE_CAPA_MASK(RS);
153362aafe03SKarra Satwik 		}
153462aafe03SKarra Satwik 		num++;
153562aafe03SKarra Satwik 	}
153662aafe03SKarra Satwik 
153762aafe03SKarra Satwik 	return num;
153862aafe03SKarra Satwik }
153962aafe03SKarra Satwik 
154062aafe03SKarra Satwik static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
154162aafe03SKarra Satwik 				    struct rte_eth_fec_capa *speed_fec_capa,
154262aafe03SKarra Satwik 				    unsigned int num)
154362aafe03SKarra Satwik {
154462aafe03SKarra Satwik 	struct port_info *pi = dev->data->dev_private;
154562aafe03SKarra Satwik 	struct link_config *lc = &pi->link_cfg;
154662aafe03SKarra Satwik 	u8 num_entries;
154762aafe03SKarra Satwik 
154862aafe03SKarra Satwik 	if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
154962aafe03SKarra Satwik 		return -EOPNOTSUPP;
155062aafe03SKarra Satwik 
155162aafe03SKarra Satwik 	num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
155262aafe03SKarra Satwik 	if (!speed_fec_capa || num < num_entries)
155362aafe03SKarra Satwik 		return num_entries;
155462aafe03SKarra Satwik 
155562aafe03SKarra Satwik 	return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
155662aafe03SKarra Satwik }
155762aafe03SKarra Satwik 
155862aafe03SKarra Satwik static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
155962aafe03SKarra Satwik {
156062aafe03SKarra Satwik 	struct port_info *pi = dev->data->dev_private;
156162aafe03SKarra Satwik 	struct link_config *lc = &pi->link_cfg;
156262aafe03SKarra Satwik 	u32 fec_caps = 0, caps = lc->link_caps;
156362aafe03SKarra Satwik 
156462aafe03SKarra Satwik 	if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
156562aafe03SKarra Satwik 		return -EOPNOTSUPP;
156662aafe03SKarra Satwik 
156762aafe03SKarra Satwik 	if (caps & FW_PORT_CAP32_FEC_RS)
156862aafe03SKarra Satwik 		fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
156962aafe03SKarra Satwik 	else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
157062aafe03SKarra Satwik 		fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
157162aafe03SKarra Satwik 	else
157262aafe03SKarra Satwik 		fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
157362aafe03SKarra Satwik 
157462aafe03SKarra Satwik 	*fec_capa = fec_caps;
157562aafe03SKarra Satwik 	return 0;
157662aafe03SKarra Satwik }
157762aafe03SKarra Satwik 
157862aafe03SKarra Satwik static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
157962aafe03SKarra Satwik {
158062aafe03SKarra Satwik 	struct port_info *pi = dev->data->dev_private;
158162aafe03SKarra Satwik 	u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
158262aafe03SKarra Satwik 	struct link_config *lc = &pi->link_cfg;
158362aafe03SKarra Satwik 	u32 new_caps = lc->admin_caps;
158462aafe03SKarra Satwik 	int ret;
158562aafe03SKarra Satwik 
158662aafe03SKarra Satwik 	if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
158762aafe03SKarra Satwik 		return -EOPNOTSUPP;
158862aafe03SKarra Satwik 
158962aafe03SKarra Satwik 	if (!fec_capa)
159062aafe03SKarra Satwik 		return -EINVAL;
159162aafe03SKarra Satwik 
159262aafe03SKarra Satwik 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
159362aafe03SKarra Satwik 		goto set_fec;
159462aafe03SKarra Satwik 
159562aafe03SKarra Satwik 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
159662aafe03SKarra Satwik 		fec_none = 1;
159762aafe03SKarra Satwik 
159862aafe03SKarra Satwik 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
159962aafe03SKarra Satwik 		fec_baser = 1;
160062aafe03SKarra Satwik 
160162aafe03SKarra Satwik 	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
160262aafe03SKarra Satwik 		fec_rs = 1;
160362aafe03SKarra Satwik 
160462aafe03SKarra Satwik set_fec:
160562aafe03SKarra Satwik 	ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
160662aafe03SKarra Satwik 	if (ret != 0)
160762aafe03SKarra Satwik 		return ret;
160862aafe03SKarra Satwik 
160962aafe03SKarra Satwik 	if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
161062aafe03SKarra Satwik 		new_caps |= FW_PORT_CAP32_FORCE_FEC;
161162aafe03SKarra Satwik 	else
161262aafe03SKarra Satwik 		new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
161362aafe03SKarra Satwik 
161462aafe03SKarra Satwik 	if (new_caps != lc->admin_caps) {
161562aafe03SKarra Satwik 		ret = t4_link_l1cfg(pi, new_caps);
161662aafe03SKarra Satwik 		if (ret == 0)
161762aafe03SKarra Satwik 			lc->admin_caps = new_caps;
161862aafe03SKarra Satwik 	}
161962aafe03SKarra Satwik 
162062aafe03SKarra Satwik 	return ret;
162162aafe03SKarra Satwik }
162262aafe03SKarra Satwik 
1623*1cd22be2SNikhil Vasoya int cxgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1624*1cd22be2SNikhil Vasoya 			 size_t fw_size)
1625*1cd22be2SNikhil Vasoya {
1626*1cd22be2SNikhil Vasoya 	struct port_info *pi = dev->data->dev_private;
1627*1cd22be2SNikhil Vasoya 	struct adapter *adapter = pi->adapter;
1628*1cd22be2SNikhil Vasoya 	int ret;
1629*1cd22be2SNikhil Vasoya 
1630*1cd22be2SNikhil Vasoya 	if (adapter->params.fw_vers == 0)
1631*1cd22be2SNikhil Vasoya 		return -EIO;
1632*1cd22be2SNikhil Vasoya 
1633*1cd22be2SNikhil Vasoya 	ret = snprintf(fw_version, fw_size, "%u.%u.%u.%u",
1634*1cd22be2SNikhil Vasoya 		       G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
1635*1cd22be2SNikhil Vasoya 		       G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
1636*1cd22be2SNikhil Vasoya 		       G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
1637*1cd22be2SNikhil Vasoya 		       G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
1638*1cd22be2SNikhil Vasoya 	if (ret < 0)
1639*1cd22be2SNikhil Vasoya 		return -EINVAL;
1640*1cd22be2SNikhil Vasoya 
1641*1cd22be2SNikhil Vasoya 	ret += 1;
1642*1cd22be2SNikhil Vasoya 	if (fw_size < (size_t)ret)
1643*1cd22be2SNikhil Vasoya 		return ret;
1644*1cd22be2SNikhil Vasoya 
1645*1cd22be2SNikhil Vasoya 	return 0;
1646*1cd22be2SNikhil Vasoya }
1647*1cd22be2SNikhil Vasoya 
164889b890dfSStephen Hemminger static const struct eth_dev_ops cxgbe_eth_dev_ops = {
16490462d115SRahul Lakkireddy 	.dev_start		= cxgbe_dev_start,
16500462d115SRahul Lakkireddy 	.dev_stop		= cxgbe_dev_stop,
16510462d115SRahul Lakkireddy 	.dev_close		= cxgbe_dev_close,
1652cdac6e2eSRahul Lakkireddy 	.promiscuous_enable	= cxgbe_dev_promiscuous_enable,
1653cdac6e2eSRahul Lakkireddy 	.promiscuous_disable	= cxgbe_dev_promiscuous_disable,
1654cdac6e2eSRahul Lakkireddy 	.allmulticast_enable	= cxgbe_dev_allmulticast_enable,
1655cdac6e2eSRahul Lakkireddy 	.allmulticast_disable	= cxgbe_dev_allmulticast_disable,
165692c8a632SRahul Lakkireddy 	.dev_configure		= cxgbe_dev_configure,
165792c8a632SRahul Lakkireddy 	.dev_infos_get		= cxgbe_dev_info_get,
165878a38edfSJianfeng Tan 	.dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
1659cdac6e2eSRahul Lakkireddy 	.link_update		= cxgbe_dev_link_update,
1660265af08eSRahul Lakkireddy 	.dev_set_link_up        = cxgbe_dev_set_link_up,
1661265af08eSRahul Lakkireddy 	.dev_set_link_down      = cxgbe_dev_set_link_down,
16620ec33be4SRahul Lakkireddy 	.mtu_set		= cxgbe_dev_mtu_set,
16634a01078bSRahul Lakkireddy 	.tx_queue_setup         = cxgbe_dev_tx_queue_setup,
16644a01078bSRahul Lakkireddy 	.tx_queue_start		= cxgbe_dev_tx_queue_start,
16654a01078bSRahul Lakkireddy 	.tx_queue_stop		= cxgbe_dev_tx_queue_stop,
16664a01078bSRahul Lakkireddy 	.tx_queue_release	= cxgbe_dev_tx_queue_release,
166792c8a632SRahul Lakkireddy 	.rx_queue_setup         = cxgbe_dev_rx_queue_setup,
166892c8a632SRahul Lakkireddy 	.rx_queue_start		= cxgbe_dev_rx_queue_start,
166992c8a632SRahul Lakkireddy 	.rx_queue_stop		= cxgbe_dev_rx_queue_stop,
167092c8a632SRahul Lakkireddy 	.rx_queue_release	= cxgbe_dev_rx_queue_release,
1671fb7ad441SThomas Monjalon 	.flow_ops_get           = cxgbe_dev_flow_ops_get,
1672856505d3SRahul Lakkireddy 	.stats_get		= cxgbe_dev_stats_get,
1673856505d3SRahul Lakkireddy 	.stats_reset		= cxgbe_dev_stats_reset,
167418e44206SRahul Lakkireddy 	.xstats_get             = cxgbe_dev_xstats_get,
167518e44206SRahul Lakkireddy 	.xstats_get_by_id       = cxgbe_dev_xstats_get_by_id,
167618e44206SRahul Lakkireddy 	.xstats_get_names       = cxgbe_dev_xstats_get_names,
167718e44206SRahul Lakkireddy 	.xstats_get_names_by_id = cxgbe_dev_xstats_get_names_by_id,
167818e44206SRahul Lakkireddy 	.xstats_reset           = cxgbe_dev_xstats_reset,
1679631dfc71SRahul Lakkireddy 	.flow_ctrl_get		= cxgbe_flow_ctrl_get,
1680631dfc71SRahul Lakkireddy 	.flow_ctrl_set		= cxgbe_flow_ctrl_set,
1681fe0bd9eeSRahul Lakkireddy 	.get_eeprom_length	= cxgbe_get_eeprom_length,
1682fe0bd9eeSRahul Lakkireddy 	.get_eeprom		= cxgbe_get_eeprom,
1683fe0bd9eeSRahul Lakkireddy 	.set_eeprom		= cxgbe_set_eeprom,
168417ba077cSRahul Lakkireddy 	.get_reg		= cxgbe_get_regs,
168508e21af9SKumar Sanghvi 	.rss_hash_update	= cxgbe_dev_rss_hash_update,
168676aba8d7SKumar Sanghvi 	.rss_hash_conf_get	= cxgbe_dev_rss_hash_conf_get,
16870c4a5dfcSKumar Sanghvi 	.mac_addr_set		= cxgbe_mac_addr_set,
1688f2d344dfSRahul Lakkireddy 	.reta_update            = cxgbe_dev_rss_reta_update,
1689f2d344dfSRahul Lakkireddy 	.reta_query             = cxgbe_dev_rss_reta_query,
169062aafe03SKarra Satwik 	.fec_get_capability     = cxgbe_fec_get_capability,
169162aafe03SKarra Satwik 	.fec_get                = cxgbe_fec_get,
169262aafe03SKarra Satwik 	.fec_set                = cxgbe_fec_set,
1693*1cd22be2SNikhil Vasoya 	.fw_version_get         = cxgbe_fw_version_get,
169483189849SRahul Lakkireddy };
169583189849SRahul Lakkireddy 
169683189849SRahul Lakkireddy /*
169783189849SRahul Lakkireddy  * Initialize driver
169883189849SRahul Lakkireddy  * It returns 0 on success.
169983189849SRahul Lakkireddy  */
170083189849SRahul Lakkireddy static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
170183189849SRahul Lakkireddy {
170283189849SRahul Lakkireddy 	struct rte_pci_device *pci_dev;
170363a97e58SStephen Hemminger 	struct port_info *pi = eth_dev->data->dev_private;
170483189849SRahul Lakkireddy 	struct adapter *adapter = NULL;
170583189849SRahul Lakkireddy 	char name[RTE_ETH_NAME_MAX_LEN];
170683189849SRahul Lakkireddy 	int err = 0;
170783189849SRahul Lakkireddy 
170883189849SRahul Lakkireddy 	CXGBE_FUNC_TRACE();
170983189849SRahul Lakkireddy 
171083189849SRahul Lakkireddy 	eth_dev->dev_ops = &cxgbe_eth_dev_ops;
171192c8a632SRahul Lakkireddy 	eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
17124a01078bSRahul Lakkireddy 	eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
1713c0802544SFerruh Yigit 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1714eeefe73fSBernard Iremonger 
1715da5cf85eSKumar Sanghvi 	/* for secondary processes, we attach to ethdevs allocated by primary
1716da5cf85eSKumar Sanghvi 	 * and do minimal initialization.
1717da5cf85eSKumar Sanghvi 	 */
1718da5cf85eSKumar Sanghvi 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1719da5cf85eSKumar Sanghvi 		int i;
1720da5cf85eSKumar Sanghvi 
1721da5cf85eSKumar Sanghvi 		for (i = 1; i < MAX_NPORTS; i++) {
1722da5cf85eSKumar Sanghvi 			struct rte_eth_dev *rest_eth_dev;
1723da5cf85eSKumar Sanghvi 			char namei[RTE_ETH_NAME_MAX_LEN];
1724da5cf85eSKumar Sanghvi 
1725da5cf85eSKumar Sanghvi 			snprintf(namei, sizeof(namei), "%s_%d",
1726da5cf85eSKumar Sanghvi 				 pci_dev->device.name, i);
1727da5cf85eSKumar Sanghvi 			rest_eth_dev = rte_eth_dev_attach_secondary(namei);
1728da5cf85eSKumar Sanghvi 			if (rest_eth_dev) {
1729da5cf85eSKumar Sanghvi 				rest_eth_dev->device = &pci_dev->device;
1730da5cf85eSKumar Sanghvi 				rest_eth_dev->dev_ops =
1731da5cf85eSKumar Sanghvi 					eth_dev->dev_ops;
1732da5cf85eSKumar Sanghvi 				rest_eth_dev->rx_pkt_burst =
1733da5cf85eSKumar Sanghvi 					eth_dev->rx_pkt_burst;
1734da5cf85eSKumar Sanghvi 				rest_eth_dev->tx_pkt_burst =
1735da5cf85eSKumar Sanghvi 					eth_dev->tx_pkt_burst;
1736fbe90cddSThomas Monjalon 				rte_eth_dev_probing_finish(rest_eth_dev);
1737da5cf85eSKumar Sanghvi 			}
1738da5cf85eSKumar Sanghvi 		}
1739da5cf85eSKumar Sanghvi 		return 0;
1740da5cf85eSKumar Sanghvi 	}
1741da5cf85eSKumar Sanghvi 
174283189849SRahul Lakkireddy 	snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
174383189849SRahul Lakkireddy 	adapter = rte_zmalloc(name, sizeof(*adapter), 0);
174483189849SRahul Lakkireddy 	if (!adapter)
174583189849SRahul Lakkireddy 		return -1;
174683189849SRahul Lakkireddy 
174783189849SRahul Lakkireddy 	adapter->use_unpacked_mode = 1;
174883189849SRahul Lakkireddy 	adapter->regs = (void *)pci_dev->mem_resource[0].addr;
174983189849SRahul Lakkireddy 	if (!adapter->regs) {
175083189849SRahul Lakkireddy 		dev_err(adapter, "%s: cannot map device registers\n", __func__);
175183189849SRahul Lakkireddy 		err = -ENOMEM;
175283189849SRahul Lakkireddy 		goto out_free_adapter;
175383189849SRahul Lakkireddy 	}
175483189849SRahul Lakkireddy 	adapter->pdev = pci_dev;
175583189849SRahul Lakkireddy 	adapter->eth_dev = eth_dev;
175683189849SRahul Lakkireddy 	pi->adapter = adapter;
175783189849SRahul Lakkireddy 
1758dd7c9f12SRahul Lakkireddy 	cxgbe_process_devargs(adapter);
1759dd7c9f12SRahul Lakkireddy 
176083189849SRahul Lakkireddy 	err = cxgbe_probe(adapter);
17611c1789ccSRahul Lakkireddy 	if (err) {
176283189849SRahul Lakkireddy 		dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
176383189849SRahul Lakkireddy 			__func__, err);
17641c1789ccSRahul Lakkireddy 		goto out_free_adapter;
17651c1789ccSRahul Lakkireddy 	}
17661c1789ccSRahul Lakkireddy 
17671c1789ccSRahul Lakkireddy 	return 0;
176883189849SRahul Lakkireddy 
176983189849SRahul Lakkireddy out_free_adapter:
17701c1789ccSRahul Lakkireddy 	rte_free(adapter);
177183189849SRahul Lakkireddy 	return err;
177283189849SRahul Lakkireddy }
177383189849SRahul Lakkireddy 
1774b84bcf40SRahul Lakkireddy static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1775b84bcf40SRahul Lakkireddy {
177611df4a68SRahul Lakkireddy 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
177711df4a68SRahul Lakkireddy 	uint16_t port_id;
17788a5a0aadSThomas Monjalon 	int err = 0;
1779b84bcf40SRahul Lakkireddy 
1780b84bcf40SRahul Lakkireddy 	/* Free up other ports and all resources */
178111df4a68SRahul Lakkireddy 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
17828a5a0aadSThomas Monjalon 		err |= rte_eth_dev_close(port_id);
178311df4a68SRahul Lakkireddy 
17848a5a0aadSThomas Monjalon 	return err == 0 ? 0 : -EIO;
1785b84bcf40SRahul Lakkireddy }
1786b84bcf40SRahul Lakkireddy 
1787fdf91e0fSJan Blunck static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1788fdf91e0fSJan Blunck 	struct rte_pci_device *pci_dev)
1789fdf91e0fSJan Blunck {
1790fdf91e0fSJan Blunck 	return rte_eth_dev_pci_generic_probe(pci_dev,
1791fdf91e0fSJan Blunck 		sizeof(struct port_info), eth_cxgbe_dev_init);
1792fdf91e0fSJan Blunck }
1793fdf91e0fSJan Blunck 
1794fdf91e0fSJan Blunck static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
1795fdf91e0fSJan Blunck {
1796b84bcf40SRahul Lakkireddy 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
1797fdf91e0fSJan Blunck }
1798fdf91e0fSJan Blunck 
1799fdf91e0fSJan Blunck static struct rte_pci_driver rte_cxgbe_pmd = {
180083189849SRahul Lakkireddy 	.id_table = cxgb4_pci_tbl,
18014dee49c1SRahul Lakkireddy 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1802fdf91e0fSJan Blunck 	.probe = eth_cxgbe_pci_probe,
1803fdf91e0fSJan Blunck 	.remove = eth_cxgbe_pci_remove,
180483189849SRahul Lakkireddy };
180583189849SRahul Lakkireddy 
1806fdf91e0fSJan Blunck RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
180701f19227SShreyansh Jain RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
180806e81dc9SDavid Marchand RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1809f5b3c7b2SShagun Agrawal RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
1810fa033437SRahul Lakkireddy 			      CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
1811536db938SKarra Satwik 			      CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
1812536db938SKarra Satwik 			      CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
1813536db938SKarra Satwik 			      CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
1814eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE);
1815eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);
1816