xref: /dpdk/lib/ethdev/ethdev_private.c (revision e075ca1d2a22552a4ee6e2f2fa8d847b9e305c8e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Gaëtan Rivet
3  */
4 
5 #include <rte_debug.h>
6 
7 #include "rte_ethdev.h"
8 #include "rte_ethdev_trace_fp.h"
9 #include "ethdev_driver.h"
10 #include "ethdev_private.h"
11 
12 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
13 
14 static const struct rte_memzone *eth_dev_shared_mz;
15 struct eth_dev_shared *eth_dev_shared_data;
16 
17 /* spinlock for eth device callbacks */
18 rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
19 
20 uint16_t
21 eth_dev_to_id(const struct rte_eth_dev *dev)
22 {
23 	if (dev == NULL)
24 		return RTE_MAX_ETHPORTS;
25 	return dev - rte_eth_devices;
26 }
27 
28 struct rte_eth_dev *
29 eth_find_device(const struct rte_eth_dev *start, rte_eth_cmp_t cmp,
30 		const void *data)
31 {
32 	struct rte_eth_dev *edev;
33 	ptrdiff_t idx;
34 
35 	/* Avoid Undefined Behaviour */
36 	if (start != NULL &&
37 	    (start < &rte_eth_devices[0] ||
38 	     start > &rte_eth_devices[RTE_MAX_ETHPORTS]))
39 		return NULL;
40 	if (start != NULL)
41 		idx = eth_dev_to_id(start) + 1;
42 	else
43 		idx = 0;
44 	for (; idx < RTE_MAX_ETHPORTS; idx++) {
45 		edev = &rte_eth_devices[idx];
46 		if (cmp(edev, data) == 0)
47 			return edev;
48 	}
49 	return NULL;
50 }
51 
52 /* Put new value into list. */
53 static int
54 rte_eth_devargs_enlist(uint16_t *list, uint16_t *len_list,
55 		       const uint16_t max_list, uint16_t val)
56 {
57 	uint16_t i;
58 
59 	for (i = 0; i < *len_list; i++) {
60 		if (list[i] == val)
61 			return 0;
62 	}
63 	if (*len_list >= max_list)
64 		return -1;
65 	list[(*len_list)++] = val;
66 	return 0;
67 }
68 
69 /* Parse and enlist a range expression of "min-max" or a single value. */
70 static char *
71 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
72 	const uint16_t max_list)
73 {
74 	uint16_t lo, hi, val;
75 	int result, n = 0;
76 	char *pos = str;
77 
78 	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
79 	if (result == 1) {
80 		if (rte_eth_devargs_enlist(list, len_list, max_list, lo) != 0)
81 			return NULL;
82 	} else if (result == 2) {
83 		if (lo > hi)
84 			return NULL;
85 		for (val = lo; val <= hi; val++) {
86 			if (rte_eth_devargs_enlist(list, len_list, max_list,
87 						   val) != 0)
88 				return NULL;
89 		}
90 	} else
91 		return NULL;
92 	return pos + n;
93 }
94 
95 /*
96  * Parse list of values separated by ",".
97  * Each value could be a range [min-max] or single number.
98  * Examples:
99  *  2               - single
100  *  [1,2,3]         - single list
101  *  [1,3-5,7,9-11]  - list with singles and ranges
102  */
103 static char *
104 rte_eth_devargs_process_list(char *str, uint16_t *list, uint16_t *len_list,
105 	const uint16_t max_list)
106 {
107 	char *pos = str;
108 
109 	if (*pos == '[')
110 		pos++;
111 	while (1) {
112 		pos = rte_eth_devargs_process_range(pos, list, len_list,
113 						    max_list);
114 		if (pos == NULL)
115 			return NULL;
116 		if (*pos != ',') /* end of list */
117 			break;
118 		pos++;
119 	}
120 	if (*str == '[' && *pos != ']')
121 		return NULL;
122 	if (*pos == ']')
123 		pos++;
124 	return pos;
125 }
126 
127 /*
128  * Parse representor ports from a single value or lists.
129  *
130  * Representor format:
131  *   #: range or single number of VF representor - legacy
132  *   [[c#]pf#]vf#: VF port representor/s
133  *   [[c#]pf#]sf#: SF port representor/s
134  *   [c#]pf#:      PF port representor/s
135  *
136  * Examples of #:
137  *  2               - single
138  *  [1,2,3]         - single list
139  *  [1,3-5,7,9-11]  - list with singles and ranges
140  */
141 int
142 rte_eth_devargs_parse_representor_ports(char *str, void *data)
143 {
144 	struct rte_eth_devargs *eth_da = data;
145 
146 	if (str[0] == 'c') {
147 		str += 1;
148 		str = rte_eth_devargs_process_list(str, eth_da->mh_controllers,
149 				&eth_da->nb_mh_controllers,
150 				RTE_DIM(eth_da->mh_controllers));
151 		if (str == NULL)
152 			goto done;
153 	}
154 	if (str[0] == 'p' && str[1] == 'f') {
155 		eth_da->type = RTE_ETH_REPRESENTOR_PF;
156 		str += 2;
157 		str = rte_eth_devargs_process_list(str, eth_da->ports,
158 				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
159 		if (str == NULL || str[0] == '\0')
160 			goto done;
161 	} else if (eth_da->nb_mh_controllers > 0) {
162 		/* 'c' must followed by 'pf'. */
163 		str = NULL;
164 		goto done;
165 	}
166 	if (str[0] == 'v' && str[1] == 'f') {
167 		eth_da->type = RTE_ETH_REPRESENTOR_VF;
168 		str += 2;
169 	} else if (str[0] == 's' && str[1] == 'f') {
170 		eth_da->type = RTE_ETH_REPRESENTOR_SF;
171 		str += 2;
172 	} else {
173 		/* 'pf' must followed by 'vf' or 'sf'. */
174 		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
175 			str = NULL;
176 			goto done;
177 		}
178 		eth_da->type = RTE_ETH_REPRESENTOR_VF;
179 	}
180 	str = rte_eth_devargs_process_list(str, eth_da->representor_ports,
181 		&eth_da->nb_representor_ports,
182 		RTE_DIM(eth_da->representor_ports));
183 done:
184 	if (str == NULL)
185 		RTE_ETHDEV_LOG_LINE(ERR, "wrong representor format: %s", str);
186 	return str == NULL ? -1 : 0;
187 }
188 
189 struct dummy_queue {
190 	bool rx_warn_once;
191 	bool tx_warn_once;
192 };
193 static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
194 static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS];
195 RTE_INIT(dummy_queue_init)
196 {
197 	uint16_t port_id;
198 
199 	for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) {
200 		unsigned int q;
201 
202 		for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++)
203 			dummy_queues_array[port_id][q] = &per_port_queues[port_id];
204 	}
205 }
206 
207 static uint16_t
208 dummy_eth_rx_burst(void *rxq,
209 		__rte_unused struct rte_mbuf **rx_pkts,
210 		__rte_unused uint16_t nb_pkts)
211 {
212 	struct dummy_queue *queue = rxq;
213 	uintptr_t port_id;
214 
215 	port_id = queue - per_port_queues;
216 	if (port_id < RTE_DIM(per_port_queues) && !queue->rx_warn_once) {
217 		RTE_ETHDEV_LOG_LINE(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR,
218 			rte_lcore_id(), port_id);
219 		rte_dump_stack();
220 		queue->rx_warn_once = true;
221 	}
222 	rte_errno = ENOTSUP;
223 	return 0;
224 }
225 
226 static uint16_t
227 dummy_eth_tx_burst(void *txq,
228 		__rte_unused struct rte_mbuf **tx_pkts,
229 		__rte_unused uint16_t nb_pkts)
230 {
231 	struct dummy_queue *queue = txq;
232 	uintptr_t port_id;
233 
234 	port_id = queue - per_port_queues;
235 	if (port_id < RTE_DIM(per_port_queues) && !queue->tx_warn_once) {
236 		RTE_ETHDEV_LOG_LINE(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR,
237 			rte_lcore_id(), port_id);
238 		rte_dump_stack();
239 		queue->tx_warn_once = true;
240 	}
241 	rte_errno = ENOTSUP;
242 	return 0;
243 }
244 
245 void
246 eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
247 {
248 	static RTE_ATOMIC(void *) dummy_data[RTE_MAX_QUEUES_PER_PORT];
249 	uintptr_t port_id = fpo - rte_eth_fp_ops;
250 
251 	per_port_queues[port_id].rx_warn_once = false;
252 	per_port_queues[port_id].tx_warn_once = false;
253 	*fpo = (struct rte_eth_fp_ops) {
254 		.rx_pkt_burst = dummy_eth_rx_burst,
255 		.tx_pkt_burst = dummy_eth_tx_burst,
256 		.rxq = {
257 			.data = (void **)&dummy_queues_array[port_id],
258 			.clbk = dummy_data,
259 		},
260 		.txq = {
261 			.data = (void **)&dummy_queues_array[port_id],
262 			.clbk = dummy_data,
263 		},
264 	};
265 }
266 
267 void
268 eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
269 		const struct rte_eth_dev *dev)
270 {
271 	fpo->rx_pkt_burst = dev->rx_pkt_burst;
272 	fpo->tx_pkt_burst = dev->tx_pkt_burst;
273 	fpo->tx_pkt_prepare = dev->tx_pkt_prepare;
274 	fpo->rx_queue_count = dev->rx_queue_count;
275 	fpo->rx_descriptor_status = dev->rx_descriptor_status;
276 	fpo->tx_queue_count = dev->tx_queue_count;
277 	fpo->tx_descriptor_status = dev->tx_descriptor_status;
278 	fpo->recycle_tx_mbufs_reuse = dev->recycle_tx_mbufs_reuse;
279 	fpo->recycle_rx_descriptors_refill = dev->recycle_rx_descriptors_refill;
280 
281 	fpo->rxq.data = dev->data->rx_queues;
282 	fpo->rxq.clbk = (void * __rte_atomic *)(uintptr_t)dev->post_rx_burst_cbs;
283 
284 	fpo->txq.data = dev->data->tx_queues;
285 	fpo->txq.clbk = (void * __rte_atomic *)(uintptr_t)dev->pre_tx_burst_cbs;
286 }
287 
288 uint16_t
289 rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
290 	struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
291 	void *opaque)
292 {
293 	const struct rte_eth_rxtx_callback *cb = opaque;
294 
295 	while (cb != NULL) {
296 		nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
297 				nb_pkts, cb->param);
298 		cb = cb->next;
299 	}
300 
301 	if (unlikely(nb_rx))
302 		rte_eth_trace_call_rx_callbacks_nonempty(port_id, queue_id, (void **)rx_pkts,
303 						nb_rx, nb_pkts);
304 	else
305 		rte_eth_trace_call_rx_callbacks_empty(port_id, queue_id, (void **)rx_pkts,
306 						nb_pkts);
307 
308 	return nb_rx;
309 }
310 
311 uint16_t
312 rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
313 	struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque)
314 {
315 	const struct rte_eth_rxtx_callback *cb = opaque;
316 
317 	while (cb != NULL) {
318 		nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
319 				cb->param);
320 		cb = cb->next;
321 	}
322 
323 	rte_eth_trace_call_tx_callbacks(port_id, queue_id, (void **)tx_pkts,
324 					nb_pkts);
325 
326 	return nb_pkts;
327 }
328 
329 void *
330 eth_dev_shared_data_prepare(void)
331 {
332 	const struct rte_memzone *mz;
333 
334 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
335 		const unsigned int flags = 0;
336 
337 		if (eth_dev_shared_mz != NULL)
338 			goto out;
339 
340 		/* Allocate port data and ownership shared memory. */
341 		mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
342 				sizeof(*eth_dev_shared_data),
343 				rte_socket_id(), flags);
344 		if (mz == NULL) {
345 			RTE_ETHDEV_LOG_LINE(ERR, "Cannot allocate ethdev shared data");
346 			goto out;
347 		}
348 
349 		eth_dev_shared_mz = mz;
350 		eth_dev_shared_data = mz->addr;
351 		eth_dev_shared_data->allocated_owners = 0;
352 		eth_dev_shared_data->next_owner_id =
353 			RTE_ETH_DEV_NO_OWNER + 1;
354 		eth_dev_shared_data->allocated_ports = 0;
355 		memset(eth_dev_shared_data->data, 0,
356 		       sizeof(eth_dev_shared_data->data));
357 	} else {
358 		mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
359 		if (mz == NULL) {
360 			/* Clean remaining any traces of a previous shared mem */
361 			eth_dev_shared_mz = NULL;
362 			eth_dev_shared_data = NULL;
363 			RTE_ETHDEV_LOG_LINE(ERR, "Cannot lookup ethdev shared data");
364 			goto out;
365 		}
366 		if (mz == eth_dev_shared_mz && mz->addr == eth_dev_shared_data)
367 			goto out;
368 
369 		/* Shared mem changed in primary process, refresh pointers */
370 		eth_dev_shared_mz = mz;
371 		eth_dev_shared_data = mz->addr;
372 	}
373 out:
374 	return eth_dev_shared_data;
375 }
376 
377 void
378 eth_dev_shared_data_release(void)
379 {
380 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
381 
382 	if (eth_dev_shared_data->allocated_ports != 0)
383 		return;
384 	if (eth_dev_shared_data->allocated_owners != 0)
385 		return;
386 
387 	rte_memzone_free(eth_dev_shared_mz);
388 	eth_dev_shared_mz = NULL;
389 	eth_dev_shared_data = NULL;
390 }
391 
392 void
393 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
394 {
395 	void **rxq = dev->data->rx_queues;
396 
397 	if (rxq[qid] == NULL)
398 		return;
399 
400 	if (dev->dev_ops->rx_queue_release != NULL)
401 		(*dev->dev_ops->rx_queue_release)(dev, qid);
402 	rxq[qid] = NULL;
403 }
404 
405 void
406 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
407 {
408 	void **txq = dev->data->tx_queues;
409 
410 	if (txq[qid] == NULL)
411 		return;
412 
413 	if (dev->dev_ops->tx_queue_release != NULL)
414 		(*dev->dev_ops->tx_queue_release)(dev, qid);
415 	txq[qid] = NULL;
416 }
417 
418 int
419 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
420 {
421 	uint16_t old_nb_queues = dev->data->nb_rx_queues;
422 	unsigned int i;
423 
424 	if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
425 		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
426 				sizeof(dev->data->rx_queues[0]) *
427 				RTE_MAX_QUEUES_PER_PORT,
428 				RTE_CACHE_LINE_SIZE);
429 		if (dev->data->rx_queues == NULL) {
430 			dev->data->nb_rx_queues = 0;
431 			return -(ENOMEM);
432 		}
433 	} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
434 		for (i = nb_queues; i < old_nb_queues; i++)
435 			eth_dev_rxq_release(dev, i);
436 
437 	} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
438 		for (i = nb_queues; i < old_nb_queues; i++)
439 			eth_dev_rxq_release(dev, i);
440 
441 		rte_free(dev->data->rx_queues);
442 		dev->data->rx_queues = NULL;
443 	}
444 	dev->data->nb_rx_queues = nb_queues;
445 	return 0;
446 }
447 
448 int
449 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
450 {
451 	uint16_t old_nb_queues = dev->data->nb_tx_queues;
452 	unsigned int i;
453 
454 	if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
455 		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
456 				sizeof(dev->data->tx_queues[0]) *
457 				RTE_MAX_QUEUES_PER_PORT,
458 				RTE_CACHE_LINE_SIZE);
459 		if (dev->data->tx_queues == NULL) {
460 			dev->data->nb_tx_queues = 0;
461 			return -(ENOMEM);
462 		}
463 	} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
464 		for (i = nb_queues; i < old_nb_queues; i++)
465 			eth_dev_txq_release(dev, i);
466 
467 	} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
468 		for (i = nb_queues; i < old_nb_queues; i++)
469 			eth_dev_txq_release(dev, i);
470 
471 		rte_free(dev->data->tx_queues);
472 		dev->data->tx_queues = NULL;
473 	}
474 	dev->data->nb_tx_queues = nb_queues;
475 	return 0;
476 }
477