xref: /dpdk/lib/ethdev/ethdev_private.c (revision f4eac3a09c51a1a2dab1f2fd3a10fe0619286a0d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Gaëtan Rivet
3  */
4 
5 #include <rte_debug.h>
6 
7 #include "rte_ethdev.h"
8 #include "ethdev_driver.h"
9 #include "ethdev_private.h"
10 
11 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
12 
13 /* Shared memory between primary and secondary processes. */
14 struct eth_dev_shared *eth_dev_shared_data;
15 
16 /* spinlock for shared data allocation */
17 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
18 
19 /* spinlock for eth device callbacks */
20 rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
21 
22 uint16_t
23 eth_dev_to_id(const struct rte_eth_dev *dev)
24 {
25 	if (dev == NULL)
26 		return RTE_MAX_ETHPORTS;
27 	return dev - rte_eth_devices;
28 }
29 
30 struct rte_eth_dev *
31 eth_find_device(const struct rte_eth_dev *start, rte_eth_cmp_t cmp,
32 		const void *data)
33 {
34 	struct rte_eth_dev *edev;
35 	ptrdiff_t idx;
36 
37 	/* Avoid Undefined Behaviour */
38 	if (start != NULL &&
39 	    (start < &rte_eth_devices[0] ||
40 	     start > &rte_eth_devices[RTE_MAX_ETHPORTS]))
41 		return NULL;
42 	if (start != NULL)
43 		idx = eth_dev_to_id(start) + 1;
44 	else
45 		idx = 0;
46 	for (; idx < RTE_MAX_ETHPORTS; idx++) {
47 		edev = &rte_eth_devices[idx];
48 		if (cmp(edev, data) == 0)
49 			return edev;
50 	}
51 	return NULL;
52 }
53 
54 /* Put new value into list. */
55 static int
56 rte_eth_devargs_enlist(uint16_t *list, uint16_t *len_list,
57 		       const uint16_t max_list, uint16_t val)
58 {
59 	uint16_t i;
60 
61 	for (i = 0; i < *len_list; i++) {
62 		if (list[i] == val)
63 			return 0;
64 	}
65 	if (*len_list >= max_list)
66 		return -1;
67 	list[(*len_list)++] = val;
68 	return 0;
69 }
70 
71 /* Parse and enlist a range expression of "min-max" or a single value. */
72 static char *
73 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
74 	const uint16_t max_list)
75 {
76 	uint16_t lo, hi, val;
77 	int result, n = 0;
78 	char *pos = str;
79 
80 	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
81 	if (result == 1) {
82 		if (rte_eth_devargs_enlist(list, len_list, max_list, lo) != 0)
83 			return NULL;
84 	} else if (result == 2) {
85 		if (lo > hi)
86 			return NULL;
87 		for (val = lo; val <= hi; val++) {
88 			if (rte_eth_devargs_enlist(list, len_list, max_list,
89 						   val) != 0)
90 				return NULL;
91 		}
92 	} else
93 		return NULL;
94 	return pos + n;
95 }
96 
97 /*
98  * Parse list of values separated by ",".
99  * Each value could be a range [min-max] or single number.
100  * Examples:
101  *  2               - single
102  *  [1,2,3]         - single list
103  *  [1,3-5,7,9-11]  - list with singles and ranges
104  */
105 static char *
106 rte_eth_devargs_process_list(char *str, uint16_t *list, uint16_t *len_list,
107 	const uint16_t max_list)
108 {
109 	char *pos = str;
110 
111 	if (*pos == '[')
112 		pos++;
113 	while (1) {
114 		pos = rte_eth_devargs_process_range(pos, list, len_list,
115 						    max_list);
116 		if (pos == NULL)
117 			return NULL;
118 		if (*pos != ',') /* end of list */
119 			break;
120 		pos++;
121 	}
122 	if (*str == '[' && *pos != ']')
123 		return NULL;
124 	if (*pos == ']')
125 		pos++;
126 	return pos;
127 }
128 
129 /*
130  * Parse representor ports from a single value or lists.
131  *
132  * Representor format:
133  *   #: range or single number of VF representor - legacy
134  *   [[c#]pf#]vf#: VF port representor/s
135  *   [[c#]pf#]sf#: SF port representor/s
136  *   [c#]pf#:      PF port representor/s
137  *
138  * Examples of #:
139  *  2               - single
140  *  [1,2,3]         - single list
141  *  [1,3-5,7,9-11]  - list with singles and ranges
142  */
143 int
144 rte_eth_devargs_parse_representor_ports(char *str, void *data)
145 {
146 	struct rte_eth_devargs *eth_da = data;
147 
148 	if (str[0] == 'c') {
149 		str += 1;
150 		str = rte_eth_devargs_process_list(str, eth_da->mh_controllers,
151 				&eth_da->nb_mh_controllers,
152 				RTE_DIM(eth_da->mh_controllers));
153 		if (str == NULL)
154 			goto done;
155 	}
156 	if (str[0] == 'p' && str[1] == 'f') {
157 		eth_da->type = RTE_ETH_REPRESENTOR_PF;
158 		str += 2;
159 		str = rte_eth_devargs_process_list(str, eth_da->ports,
160 				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
161 		if (str == NULL || str[0] == '\0')
162 			goto done;
163 	} else if (eth_da->nb_mh_controllers > 0) {
164 		/* 'c' must followed by 'pf'. */
165 		str = NULL;
166 		goto done;
167 	}
168 	if (str[0] == 'v' && str[1] == 'f') {
169 		eth_da->type = RTE_ETH_REPRESENTOR_VF;
170 		str += 2;
171 	} else if (str[0] == 's' && str[1] == 'f') {
172 		eth_da->type = RTE_ETH_REPRESENTOR_SF;
173 		str += 2;
174 	} else {
175 		/* 'pf' must followed by 'vf' or 'sf'. */
176 		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
177 			str = NULL;
178 			goto done;
179 		}
180 		eth_da->type = RTE_ETH_REPRESENTOR_VF;
181 	}
182 	str = rte_eth_devargs_process_list(str, eth_da->representor_ports,
183 		&eth_da->nb_representor_ports,
184 		RTE_DIM(eth_da->representor_ports));
185 done:
186 	if (str == NULL)
187 		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
188 	return str == NULL ? -1 : 0;
189 }
190 
191 struct dummy_queue {
192 	bool rx_warn_once;
193 	bool tx_warn_once;
194 };
195 static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
196 static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS];
197 RTE_INIT(dummy_queue_init)
198 {
199 	uint16_t port_id;
200 
201 	for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) {
202 		unsigned int q;
203 
204 		for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++)
205 			dummy_queues_array[port_id][q] = &per_port_queues[port_id];
206 	}
207 }
208 
209 static uint16_t
210 dummy_eth_rx_burst(void *rxq,
211 		__rte_unused struct rte_mbuf **rx_pkts,
212 		__rte_unused uint16_t nb_pkts)
213 {
214 	struct dummy_queue *queue = rxq;
215 	uintptr_t port_id;
216 
217 	port_id = queue - per_port_queues;
218 	if (port_id < RTE_DIM(per_port_queues) && !queue->rx_warn_once) {
219 		RTE_ETHDEV_LOG(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR"\n",
220 			rte_lcore_id(), port_id);
221 		rte_dump_stack();
222 		queue->rx_warn_once = true;
223 	}
224 	rte_errno = ENOTSUP;
225 	return 0;
226 }
227 
228 static uint16_t
229 dummy_eth_tx_burst(void *txq,
230 		__rte_unused struct rte_mbuf **tx_pkts,
231 		__rte_unused uint16_t nb_pkts)
232 {
233 	struct dummy_queue *queue = txq;
234 	uintptr_t port_id;
235 
236 	port_id = queue - per_port_queues;
237 	if (port_id < RTE_DIM(per_port_queues) && !queue->tx_warn_once) {
238 		RTE_ETHDEV_LOG(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR"\n",
239 			rte_lcore_id(), port_id);
240 		rte_dump_stack();
241 		queue->tx_warn_once = true;
242 	}
243 	rte_errno = ENOTSUP;
244 	return 0;
245 }
246 
247 void
248 eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
249 {
250 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
251 	uintptr_t port_id = fpo - rte_eth_fp_ops;
252 
253 	per_port_queues[port_id].rx_warn_once = false;
254 	per_port_queues[port_id].tx_warn_once = false;
255 	*fpo = (struct rte_eth_fp_ops) {
256 		.rx_pkt_burst = dummy_eth_rx_burst,
257 		.tx_pkt_burst = dummy_eth_tx_burst,
258 		.rxq = {
259 			.data = (void **)&dummy_queues_array[port_id],
260 			.clbk = dummy_data,
261 		},
262 		.txq = {
263 			.data = (void **)&dummy_queues_array[port_id],
264 			.clbk = dummy_data,
265 		},
266 	};
267 }
268 
269 void
270 eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
271 		const struct rte_eth_dev *dev)
272 {
273 	fpo->rx_pkt_burst = dev->rx_pkt_burst;
274 	fpo->tx_pkt_burst = dev->tx_pkt_burst;
275 	fpo->tx_pkt_prepare = dev->tx_pkt_prepare;
276 	fpo->rx_queue_count = dev->rx_queue_count;
277 	fpo->rx_descriptor_status = dev->rx_descriptor_status;
278 	fpo->tx_descriptor_status = dev->tx_descriptor_status;
279 
280 	fpo->rxq.data = dev->data->rx_queues;
281 	fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs;
282 
283 	fpo->txq.data = dev->data->tx_queues;
284 	fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs;
285 }
286 
287 uint16_t
288 rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
289 	struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
290 	void *opaque)
291 {
292 	const struct rte_eth_rxtx_callback *cb = opaque;
293 
294 	while (cb != NULL) {
295 		nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
296 				nb_pkts, cb->param);
297 		cb = cb->next;
298 	}
299 
300 	return nb_rx;
301 }
302 
303 uint16_t
304 rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
305 	struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque)
306 {
307 	const struct rte_eth_rxtx_callback *cb = opaque;
308 
309 	while (cb != NULL) {
310 		nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
311 				cb->param);
312 		cb = cb->next;
313 	}
314 
315 	return nb_pkts;
316 }
317 
318 void
319 eth_dev_shared_data_prepare(void)
320 {
321 	const unsigned int flags = 0;
322 	const struct rte_memzone *mz;
323 
324 	rte_spinlock_lock(&eth_dev_shared_data_lock);
325 
326 	if (eth_dev_shared_data == NULL) {
327 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
328 			/* Allocate port data and ownership shared memory. */
329 			mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
330 					sizeof(*eth_dev_shared_data),
331 					rte_socket_id(), flags);
332 		} else
333 			mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
334 		if (mz == NULL)
335 			rte_panic("Cannot allocate ethdev shared data\n");
336 
337 		eth_dev_shared_data = mz->addr;
338 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
339 			eth_dev_shared_data->next_owner_id =
340 					RTE_ETH_DEV_NO_OWNER + 1;
341 			rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
342 			memset(eth_dev_shared_data->data, 0,
343 			       sizeof(eth_dev_shared_data->data));
344 		}
345 	}
346 
347 	rte_spinlock_unlock(&eth_dev_shared_data_lock);
348 }
349 
350 void
351 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
352 {
353 	void **rxq = dev->data->rx_queues;
354 
355 	if (rxq[qid] == NULL)
356 		return;
357 
358 	if (dev->dev_ops->rx_queue_release != NULL)
359 		(*dev->dev_ops->rx_queue_release)(dev, qid);
360 	rxq[qid] = NULL;
361 }
362 
363 void
364 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
365 {
366 	void **txq = dev->data->tx_queues;
367 
368 	if (txq[qid] == NULL)
369 		return;
370 
371 	if (dev->dev_ops->tx_queue_release != NULL)
372 		(*dev->dev_ops->tx_queue_release)(dev, qid);
373 	txq[qid] = NULL;
374 }
375 
376 int
377 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
378 {
379 	uint16_t old_nb_queues = dev->data->nb_rx_queues;
380 	unsigned int i;
381 
382 	if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
383 		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
384 				sizeof(dev->data->rx_queues[0]) *
385 				RTE_MAX_QUEUES_PER_PORT,
386 				RTE_CACHE_LINE_SIZE);
387 		if (dev->data->rx_queues == NULL) {
388 			dev->data->nb_rx_queues = 0;
389 			return -(ENOMEM);
390 		}
391 	} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
392 		for (i = nb_queues; i < old_nb_queues; i++)
393 			eth_dev_rxq_release(dev, i);
394 
395 	} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
396 		for (i = nb_queues; i < old_nb_queues; i++)
397 			eth_dev_rxq_release(dev, i);
398 
399 		rte_free(dev->data->rx_queues);
400 		dev->data->rx_queues = NULL;
401 	}
402 	dev->data->nb_rx_queues = nb_queues;
403 	return 0;
404 }
405 
406 int
407 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
408 {
409 	uint16_t old_nb_queues = dev->data->nb_tx_queues;
410 	unsigned int i;
411 
412 	if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
413 		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
414 				sizeof(dev->data->tx_queues[0]) *
415 				RTE_MAX_QUEUES_PER_PORT,
416 				RTE_CACHE_LINE_SIZE);
417 		if (dev->data->tx_queues == NULL) {
418 			dev->data->nb_tx_queues = 0;
419 			return -(ENOMEM);
420 		}
421 	} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
422 		for (i = nb_queues; i < old_nb_queues; i++)
423 			eth_dev_txq_release(dev, i);
424 
425 	} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
426 		for (i = nb_queues; i < old_nb_queues; i++)
427 			eth_dev_txq_release(dev, i);
428 
429 		rte_free(dev->data->tx_queues);
430 		dev->data->tx_queues = NULL;
431 	}
432 	dev->data->nb_tx_queues = nb_queues;
433 	return 0;
434 }
435 
436