xref: /dpdk/drivers/net/cxgbe/cxgbevf_main.c (revision 5e80364a0389f76e4c8c20cdbe40a4db36b49f58)
1*5e80364aSKumar Sanghvi /* SPDX-License-Identifier: BSD-3-Clause
2*5e80364aSKumar Sanghvi  * Copyright(c) 2018 Chelsio Communications.
3*5e80364aSKumar Sanghvi  * All rights reserved.
4*5e80364aSKumar Sanghvi  */
5*5e80364aSKumar Sanghvi 
6*5e80364aSKumar Sanghvi #include <rte_ethdev_driver.h>
7*5e80364aSKumar Sanghvi #include <rte_ethdev_pci.h>
8*5e80364aSKumar Sanghvi #include <rte_malloc.h>
9*5e80364aSKumar Sanghvi 
10*5e80364aSKumar Sanghvi #include "common.h"
11*5e80364aSKumar Sanghvi #include "t4_regs.h"
12*5e80364aSKumar Sanghvi #include "t4_msg.h"
13*5e80364aSKumar Sanghvi #include "cxgbe.h"
14*5e80364aSKumar Sanghvi 
15*5e80364aSKumar Sanghvi /*
16*5e80364aSKumar Sanghvi  * Figure out how many Ports and Queue Sets we can support.  This depends on
17*5e80364aSKumar Sanghvi  * knowing our Virtual Function Resources and may be called a second time if
18*5e80364aSKumar Sanghvi  * we fall back from MSI-X to MSI Interrupt Mode.
19*5e80364aSKumar Sanghvi  */
20*5e80364aSKumar Sanghvi static void size_nports_qsets(struct adapter *adapter)
21*5e80364aSKumar Sanghvi {
22*5e80364aSKumar Sanghvi 	struct vf_resources *vfres = &adapter->params.vfres;
23*5e80364aSKumar Sanghvi 	unsigned int ethqsets, pmask_nports;
24*5e80364aSKumar Sanghvi 
25*5e80364aSKumar Sanghvi 	/*
26*5e80364aSKumar Sanghvi 	 * The number of "ports" which we support is equal to the number of
27*5e80364aSKumar Sanghvi 	 * Virtual Interfaces with which we've been provisioned.
28*5e80364aSKumar Sanghvi 	 */
29*5e80364aSKumar Sanghvi 	adapter->params.nports = vfres->nvi;
30*5e80364aSKumar Sanghvi 	if (adapter->params.nports > MAX_NPORTS) {
31*5e80364aSKumar Sanghvi 		dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
32*5e80364aSKumar Sanghvi 			 " allowed virtual interfaces\n", MAX_NPORTS,
33*5e80364aSKumar Sanghvi 			 adapter->params.nports);
34*5e80364aSKumar Sanghvi 		adapter->params.nports = MAX_NPORTS;
35*5e80364aSKumar Sanghvi 	}
36*5e80364aSKumar Sanghvi 
37*5e80364aSKumar Sanghvi 	/*
38*5e80364aSKumar Sanghvi 	 * We may have been provisioned with more VIs than the number of
39*5e80364aSKumar Sanghvi 	 * ports we're allowed to access (our Port Access Rights Mask).
40*5e80364aSKumar Sanghvi 	 * This is obviously a configuration conflict but we don't want to
41*5e80364aSKumar Sanghvi 	 * do anything silly just because of that.
42*5e80364aSKumar Sanghvi 	 */
43*5e80364aSKumar Sanghvi 	pmask_nports = hweight32(adapter->params.vfres.pmask);
44*5e80364aSKumar Sanghvi 	if (pmask_nports < adapter->params.nports) {
45*5e80364aSKumar Sanghvi 		dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
46*5e80364aSKumar Sanghvi 			 " virtual interfaces; limited by Port Access Rights"
47*5e80364aSKumar Sanghvi 			 " mask %#x\n", pmask_nports, adapter->params.nports,
48*5e80364aSKumar Sanghvi 			 adapter->params.vfres.pmask);
49*5e80364aSKumar Sanghvi 		adapter->params.nports = pmask_nports;
50*5e80364aSKumar Sanghvi 	}
51*5e80364aSKumar Sanghvi 
52*5e80364aSKumar Sanghvi 	/*
53*5e80364aSKumar Sanghvi 	 * We need to reserve an Ingress Queue for the Asynchronous Firmware
54*5e80364aSKumar Sanghvi 	 * Event Queue.
55*5e80364aSKumar Sanghvi 	 *
56*5e80364aSKumar Sanghvi 	 * For each Queue Set, we'll need the ability to allocate two Egress
57*5e80364aSKumar Sanghvi 	 * Contexts -- one for the Ingress Queue Free List and one for the TX
58*5e80364aSKumar Sanghvi 	 * Ethernet Queue.
59*5e80364aSKumar Sanghvi 	 */
60*5e80364aSKumar Sanghvi 	ethqsets = vfres->niqflint - 1;
61*5e80364aSKumar Sanghvi 	if (vfres->nethctrl != ethqsets)
62*5e80364aSKumar Sanghvi 		ethqsets = min(vfres->nethctrl, ethqsets);
63*5e80364aSKumar Sanghvi 	if (vfres->neq < ethqsets * 2)
64*5e80364aSKumar Sanghvi 		ethqsets = vfres->neq / 2;
65*5e80364aSKumar Sanghvi 	if (ethqsets > MAX_ETH_QSETS)
66*5e80364aSKumar Sanghvi 		ethqsets = MAX_ETH_QSETS;
67*5e80364aSKumar Sanghvi 	adapter->sge.max_ethqsets = ethqsets;
68*5e80364aSKumar Sanghvi 
69*5e80364aSKumar Sanghvi 	if (adapter->sge.max_ethqsets < adapter->params.nports) {
70*5e80364aSKumar Sanghvi 		dev_warn(adapter->pdev_dev, "only using %d of %d available"
71*5e80364aSKumar Sanghvi 			 " virtual interfaces (too few Queue Sets)\n",
72*5e80364aSKumar Sanghvi 			 adapter->sge.max_ethqsets, adapter->params.nports);
73*5e80364aSKumar Sanghvi 		adapter->params.nports = adapter->sge.max_ethqsets;
74*5e80364aSKumar Sanghvi 	}
75*5e80364aSKumar Sanghvi }
76*5e80364aSKumar Sanghvi 
77*5e80364aSKumar Sanghvi static int adap_init0vf(struct adapter *adapter)
78*5e80364aSKumar Sanghvi {
79*5e80364aSKumar Sanghvi 	u32 param, val = 0;
80*5e80364aSKumar Sanghvi 	int err;
81*5e80364aSKumar Sanghvi 
82*5e80364aSKumar Sanghvi 	err = t4vf_fw_reset(adapter);
83*5e80364aSKumar Sanghvi 	if (err < 0) {
84*5e80364aSKumar Sanghvi 		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
85*5e80364aSKumar Sanghvi 		return err;
86*5e80364aSKumar Sanghvi 	}
87*5e80364aSKumar Sanghvi 
88*5e80364aSKumar Sanghvi 	/*
89*5e80364aSKumar Sanghvi 	 * Grab basic operational parameters.  These will predominantly have
90*5e80364aSKumar Sanghvi 	 * been set up by the Physical Function Driver or will be hard coded
91*5e80364aSKumar Sanghvi 	 * into the adapter.  We just have to live with them ...  Note that
92*5e80364aSKumar Sanghvi 	 * we _must_ get our VPD parameters before our SGE parameters because
93*5e80364aSKumar Sanghvi 	 * we need to know the adapter's core clock from the VPD in order to
94*5e80364aSKumar Sanghvi 	 * properly decode the SGE Timer Values.
95*5e80364aSKumar Sanghvi 	 */
96*5e80364aSKumar Sanghvi 	err = t4vf_get_dev_params(adapter);
97*5e80364aSKumar Sanghvi 	if (err) {
98*5e80364aSKumar Sanghvi 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
99*5e80364aSKumar Sanghvi 			" device parameters: err=%d\n", err);
100*5e80364aSKumar Sanghvi 		return err;
101*5e80364aSKumar Sanghvi 	}
102*5e80364aSKumar Sanghvi 
103*5e80364aSKumar Sanghvi 	err = t4vf_get_vpd_params(adapter);
104*5e80364aSKumar Sanghvi 	if (err) {
105*5e80364aSKumar Sanghvi 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
106*5e80364aSKumar Sanghvi 			" VPD parameters: err=%d\n", err);
107*5e80364aSKumar Sanghvi 		return err;
108*5e80364aSKumar Sanghvi 	}
109*5e80364aSKumar Sanghvi 
110*5e80364aSKumar Sanghvi 	adapter->pf = t4vf_get_pf_from_vf(adapter);
111*5e80364aSKumar Sanghvi 
112*5e80364aSKumar Sanghvi 	/* If we're running on newer firmware, let it know that we're
113*5e80364aSKumar Sanghvi 	 * prepared to deal with encapsulated CPL messages.  Older
114*5e80364aSKumar Sanghvi 	 * firmware won't understand this and we'll just get
115*5e80364aSKumar Sanghvi 	 * unencapsulated messages ...
116*5e80364aSKumar Sanghvi 	 */
117*5e80364aSKumar Sanghvi 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
118*5e80364aSKumar Sanghvi 		V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
119*5e80364aSKumar Sanghvi 	val = 1;
120*5e80364aSKumar Sanghvi 	t4vf_set_params(adapter, 1, &param, &val);
121*5e80364aSKumar Sanghvi 
122*5e80364aSKumar Sanghvi 	/*
123*5e80364aSKumar Sanghvi 	 * Grab our Virtual Interface resource allocation, extract the
124*5e80364aSKumar Sanghvi 	 * features that we're interested in and do a bit of sanity testing on
125*5e80364aSKumar Sanghvi 	 * what we discover.
126*5e80364aSKumar Sanghvi 	 */
127*5e80364aSKumar Sanghvi 	err = t4vf_get_vfres(adapter);
128*5e80364aSKumar Sanghvi 	if (err) {
129*5e80364aSKumar Sanghvi 		dev_err(adapter->pdev_dev, "unable to get virtual interface"
130*5e80364aSKumar Sanghvi 			" resources: err=%d\n", err);
131*5e80364aSKumar Sanghvi 		return err;
132*5e80364aSKumar Sanghvi 	}
133*5e80364aSKumar Sanghvi 
134*5e80364aSKumar Sanghvi 	/*
135*5e80364aSKumar Sanghvi 	 * Check for various parameter sanity issues.
136*5e80364aSKumar Sanghvi 	 */
137*5e80364aSKumar Sanghvi 	if (adapter->params.vfres.pmask == 0) {
138*5e80364aSKumar Sanghvi 		dev_err(adapter->pdev_dev, "no port access configured\n"
139*5e80364aSKumar Sanghvi 			"usable!\n");
140*5e80364aSKumar Sanghvi 		return -EINVAL;
141*5e80364aSKumar Sanghvi 	}
142*5e80364aSKumar Sanghvi 	if (adapter->params.vfres.nvi == 0) {
143*5e80364aSKumar Sanghvi 		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
144*5e80364aSKumar Sanghvi 			"usable!\n");
145*5e80364aSKumar Sanghvi 		return -EINVAL;
146*5e80364aSKumar Sanghvi 	}
147*5e80364aSKumar Sanghvi 
148*5e80364aSKumar Sanghvi 	/*
149*5e80364aSKumar Sanghvi 	 * Initialize nports and max_ethqsets now that we have our Virtual
150*5e80364aSKumar Sanghvi 	 * Function Resources.
151*5e80364aSKumar Sanghvi 	 */
152*5e80364aSKumar Sanghvi 	size_nports_qsets(adapter);
153*5e80364aSKumar Sanghvi 	adapter->flags |= FW_OK;
154*5e80364aSKumar Sanghvi 	return 0;
155*5e80364aSKumar Sanghvi }
156*5e80364aSKumar Sanghvi 
157*5e80364aSKumar Sanghvi int cxgbevf_probe(struct adapter *adapter)
158*5e80364aSKumar Sanghvi {
159*5e80364aSKumar Sanghvi 	struct port_info *pi;
160*5e80364aSKumar Sanghvi 	unsigned int pmask;
161*5e80364aSKumar Sanghvi 	int err = 0;
162*5e80364aSKumar Sanghvi 	int i;
163*5e80364aSKumar Sanghvi 
164*5e80364aSKumar Sanghvi 	t4_os_lock_init(&adapter->mbox_lock);
165*5e80364aSKumar Sanghvi 	TAILQ_INIT(&adapter->mbox_list);
166*5e80364aSKumar Sanghvi 	err = t4vf_prep_adapter(adapter);
167*5e80364aSKumar Sanghvi 	if (err)
168*5e80364aSKumar Sanghvi 		return err;
169*5e80364aSKumar Sanghvi 
170*5e80364aSKumar Sanghvi 	if (!is_t4(adapter->params.chip)) {
171*5e80364aSKumar Sanghvi 		adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
172*5e80364aSKumar Sanghvi 		if (!adapter->bar2) {
173*5e80364aSKumar Sanghvi 			dev_err(adapter, "cannot map device bar2 region\n");
174*5e80364aSKumar Sanghvi 			err = -ENOMEM;
175*5e80364aSKumar Sanghvi 			return err;
176*5e80364aSKumar Sanghvi 		}
177*5e80364aSKumar Sanghvi 	}
178*5e80364aSKumar Sanghvi 
179*5e80364aSKumar Sanghvi 	err = adap_init0vf(adapter);
180*5e80364aSKumar Sanghvi 	if (err) {
181*5e80364aSKumar Sanghvi 		dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
182*5e80364aSKumar Sanghvi 				__func__, err);
183*5e80364aSKumar Sanghvi 		goto out_free;
184*5e80364aSKumar Sanghvi 	}
185*5e80364aSKumar Sanghvi 
186*5e80364aSKumar Sanghvi 	pmask = adapter->params.vfres.pmask;
187*5e80364aSKumar Sanghvi 	for_each_port(adapter, i) {
188*5e80364aSKumar Sanghvi 		const unsigned int numa_node = rte_socket_id();
189*5e80364aSKumar Sanghvi 		char name[RTE_ETH_NAME_MAX_LEN];
190*5e80364aSKumar Sanghvi 		struct rte_eth_dev *eth_dev;
191*5e80364aSKumar Sanghvi 		int port_id;
192*5e80364aSKumar Sanghvi 
193*5e80364aSKumar Sanghvi 		if (pmask == 0)
194*5e80364aSKumar Sanghvi 			break;
195*5e80364aSKumar Sanghvi 		port_id = ffs(pmask) - 1;
196*5e80364aSKumar Sanghvi 		pmask &= ~(1 << port_id);
197*5e80364aSKumar Sanghvi 
198*5e80364aSKumar Sanghvi 		snprintf(name, sizeof(name), "%s_%d",
199*5e80364aSKumar Sanghvi 			 adapter->pdev->device.name, i);
200*5e80364aSKumar Sanghvi 
201*5e80364aSKumar Sanghvi 		if (i == 0) {
202*5e80364aSKumar Sanghvi 			/* First port is already allocated by DPDK */
203*5e80364aSKumar Sanghvi 			eth_dev = adapter->eth_dev;
204*5e80364aSKumar Sanghvi 			goto allocate_mac;
205*5e80364aSKumar Sanghvi 		}
206*5e80364aSKumar Sanghvi 
207*5e80364aSKumar Sanghvi 		/*
208*5e80364aSKumar Sanghvi 		 * now do all data allocation - for eth_dev structure,
209*5e80364aSKumar Sanghvi 		 * and internal (private) data for the remaining ports
210*5e80364aSKumar Sanghvi 		 */
211*5e80364aSKumar Sanghvi 
212*5e80364aSKumar Sanghvi 		/* reserve an ethdev entry */
213*5e80364aSKumar Sanghvi 		eth_dev = rte_eth_dev_allocate(name);
214*5e80364aSKumar Sanghvi 		if (!eth_dev) {
215*5e80364aSKumar Sanghvi 			err = -ENOMEM;
216*5e80364aSKumar Sanghvi 			goto out_free;
217*5e80364aSKumar Sanghvi 		}
218*5e80364aSKumar Sanghvi 		eth_dev->data->dev_private =
219*5e80364aSKumar Sanghvi 			rte_zmalloc_socket(name, sizeof(struct port_info),
220*5e80364aSKumar Sanghvi 					   RTE_CACHE_LINE_SIZE, numa_node);
221*5e80364aSKumar Sanghvi 		if (!eth_dev->data->dev_private)
222*5e80364aSKumar Sanghvi 			goto out_free;
223*5e80364aSKumar Sanghvi 
224*5e80364aSKumar Sanghvi allocate_mac:
225*5e80364aSKumar Sanghvi 		pi = (struct port_info *)eth_dev->data->dev_private;
226*5e80364aSKumar Sanghvi 		adapter->port[i] = pi;
227*5e80364aSKumar Sanghvi 		pi->eth_dev = eth_dev;
228*5e80364aSKumar Sanghvi 		pi->adapter = adapter;
229*5e80364aSKumar Sanghvi 		pi->xact_addr_filt = -1;
230*5e80364aSKumar Sanghvi 		pi->port_id = port_id;
231*5e80364aSKumar Sanghvi 		pi->pidx = i;
232*5e80364aSKumar Sanghvi 
233*5e80364aSKumar Sanghvi 		pi->eth_dev->device = &adapter->pdev->device;
234*5e80364aSKumar Sanghvi 		pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
235*5e80364aSKumar Sanghvi 		pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
236*5e80364aSKumar Sanghvi 		pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
237*5e80364aSKumar Sanghvi 
238*5e80364aSKumar Sanghvi 		rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
239*5e80364aSKumar Sanghvi 		pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
240*5e80364aSKumar Sanghvi 							   ETHER_ADDR_LEN, 0);
241*5e80364aSKumar Sanghvi 		if (!pi->eth_dev->data->mac_addrs) {
242*5e80364aSKumar Sanghvi 			dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
243*5e80364aSKumar Sanghvi 				__func__);
244*5e80364aSKumar Sanghvi 			err = -ENOMEM;
245*5e80364aSKumar Sanghvi 			goto out_free;
246*5e80364aSKumar Sanghvi 		}
247*5e80364aSKumar Sanghvi 	}
248*5e80364aSKumar Sanghvi 
249*5e80364aSKumar Sanghvi 	if (adapter->flags & FW_OK) {
250*5e80364aSKumar Sanghvi 		err = t4vf_port_init(adapter);
251*5e80364aSKumar Sanghvi 		if (err) {
252*5e80364aSKumar Sanghvi 			dev_err(adapter, "%s: t4_port_init failed with err %d\n",
253*5e80364aSKumar Sanghvi 				__func__, err);
254*5e80364aSKumar Sanghvi 			goto out_free;
255*5e80364aSKumar Sanghvi 		}
256*5e80364aSKumar Sanghvi 	}
257*5e80364aSKumar Sanghvi 
258*5e80364aSKumar Sanghvi 	cfg_queues(adapter->eth_dev);
259*5e80364aSKumar Sanghvi 	print_adapter_info(adapter);
260*5e80364aSKumar Sanghvi 	print_port_info(adapter);
261*5e80364aSKumar Sanghvi 
262*5e80364aSKumar Sanghvi 	return 0;
263*5e80364aSKumar Sanghvi 
264*5e80364aSKumar Sanghvi out_free:
265*5e80364aSKumar Sanghvi 	for_each_port(adapter, i) {
266*5e80364aSKumar Sanghvi 		pi = adap2pinfo(adapter, i);
267*5e80364aSKumar Sanghvi 		if (pi->viid != 0)
268*5e80364aSKumar Sanghvi 			t4_free_vi(adapter, adapter->mbox, adapter->pf,
269*5e80364aSKumar Sanghvi 				   0, pi->viid);
270*5e80364aSKumar Sanghvi 		/* Skip first port since it'll be de-allocated by DPDK */
271*5e80364aSKumar Sanghvi 		if (i == 0)
272*5e80364aSKumar Sanghvi 			continue;
273*5e80364aSKumar Sanghvi 		if (pi->eth_dev) {
274*5e80364aSKumar Sanghvi 			if (pi->eth_dev->data->dev_private)
275*5e80364aSKumar Sanghvi 				rte_free(pi->eth_dev->data->dev_private);
276*5e80364aSKumar Sanghvi 			rte_eth_dev_release_port(pi->eth_dev);
277*5e80364aSKumar Sanghvi 		}
278*5e80364aSKumar Sanghvi 	}
279*5e80364aSKumar Sanghvi 	return -err;
280*5e80364aSKumar Sanghvi }
281