xref: /dpdk/lib/ethdev/ethdev_private.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Gaëtan Rivet
3  */
4 
5 #include <rte_debug.h>
6 
7 #include "rte_ethdev.h"
8 #include "rte_ethdev_trace_fp.h"
9 #include "ethdev_driver.h"
10 #include "ethdev_private.h"
11 
12 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
13 
14 /* Shared memory between primary and secondary processes. */
15 struct eth_dev_shared *eth_dev_shared_data;
16 
17 /* spinlock for shared data allocation */
18 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
19 
20 /* spinlock for eth device callbacks */
21 rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
22 
23 uint16_t
24 eth_dev_to_id(const struct rte_eth_dev *dev)
25 {
26 	if (dev == NULL)
27 		return RTE_MAX_ETHPORTS;
28 	return dev - rte_eth_devices;
29 }
30 
31 struct rte_eth_dev *
32 eth_find_device(const struct rte_eth_dev *start, rte_eth_cmp_t cmp,
33 		const void *data)
34 {
35 	struct rte_eth_dev *edev;
36 	ptrdiff_t idx;
37 
38 	/* Avoid Undefined Behaviour */
39 	if (start != NULL &&
40 	    (start < &rte_eth_devices[0] ||
41 	     start > &rte_eth_devices[RTE_MAX_ETHPORTS]))
42 		return NULL;
43 	if (start != NULL)
44 		idx = eth_dev_to_id(start) + 1;
45 	else
46 		idx = 0;
47 	for (; idx < RTE_MAX_ETHPORTS; idx++) {
48 		edev = &rte_eth_devices[idx];
49 		if (cmp(edev, data) == 0)
50 			return edev;
51 	}
52 	return NULL;
53 }
54 
55 /* Put new value into list. */
56 static int
57 rte_eth_devargs_enlist(uint16_t *list, uint16_t *len_list,
58 		       const uint16_t max_list, uint16_t val)
59 {
60 	uint16_t i;
61 
62 	for (i = 0; i < *len_list; i++) {
63 		if (list[i] == val)
64 			return 0;
65 	}
66 	if (*len_list >= max_list)
67 		return -1;
68 	list[(*len_list)++] = val;
69 	return 0;
70 }
71 
72 /* Parse and enlist a range expression of "min-max" or a single value. */
73 static char *
74 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
75 	const uint16_t max_list)
76 {
77 	uint16_t lo, hi, val;
78 	int result, n = 0;
79 	char *pos = str;
80 
81 	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
82 	if (result == 1) {
83 		if (rte_eth_devargs_enlist(list, len_list, max_list, lo) != 0)
84 			return NULL;
85 	} else if (result == 2) {
86 		if (lo > hi)
87 			return NULL;
88 		for (val = lo; val <= hi; val++) {
89 			if (rte_eth_devargs_enlist(list, len_list, max_list,
90 						   val) != 0)
91 				return NULL;
92 		}
93 	} else
94 		return NULL;
95 	return pos + n;
96 }
97 
98 /*
99  * Parse list of values separated by ",".
100  * Each value could be a range [min-max] or single number.
101  * Examples:
102  *  2               - single
103  *  [1,2,3]         - single list
104  *  [1,3-5,7,9-11]  - list with singles and ranges
105  */
106 static char *
107 rte_eth_devargs_process_list(char *str, uint16_t *list, uint16_t *len_list,
108 	const uint16_t max_list)
109 {
110 	char *pos = str;
111 
112 	if (*pos == '[')
113 		pos++;
114 	while (1) {
115 		pos = rte_eth_devargs_process_range(pos, list, len_list,
116 						    max_list);
117 		if (pos == NULL)
118 			return NULL;
119 		if (*pos != ',') /* end of list */
120 			break;
121 		pos++;
122 	}
123 	if (*str == '[' && *pos != ']')
124 		return NULL;
125 	if (*pos == ']')
126 		pos++;
127 	return pos;
128 }
129 
130 /*
131  * Parse representor ports from a single value or lists.
132  *
133  * Representor format:
134  *   #: range or single number of VF representor - legacy
135  *   [[c#]pf#]vf#: VF port representor/s
136  *   [[c#]pf#]sf#: SF port representor/s
137  *   [c#]pf#:      PF port representor/s
138  *
139  * Examples of #:
140  *  2               - single
141  *  [1,2,3]         - single list
142  *  [1,3-5,7,9-11]  - list with singles and ranges
143  */
144 int
145 rte_eth_devargs_parse_representor_ports(char *str, void *data)
146 {
147 	struct rte_eth_devargs *eth_da = data;
148 
149 	if (str[0] == 'c') {
150 		str += 1;
151 		str = rte_eth_devargs_process_list(str, eth_da->mh_controllers,
152 				&eth_da->nb_mh_controllers,
153 				RTE_DIM(eth_da->mh_controllers));
154 		if (str == NULL)
155 			goto done;
156 	}
157 	if (str[0] == 'p' && str[1] == 'f') {
158 		eth_da->type = RTE_ETH_REPRESENTOR_PF;
159 		str += 2;
160 		str = rte_eth_devargs_process_list(str, eth_da->ports,
161 				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
162 		if (str == NULL || str[0] == '\0')
163 			goto done;
164 	} else if (eth_da->nb_mh_controllers > 0) {
165 		/* 'c' must followed by 'pf'. */
166 		str = NULL;
167 		goto done;
168 	}
169 	if (str[0] == 'v' && str[1] == 'f') {
170 		eth_da->type = RTE_ETH_REPRESENTOR_VF;
171 		str += 2;
172 	} else if (str[0] == 's' && str[1] == 'f') {
173 		eth_da->type = RTE_ETH_REPRESENTOR_SF;
174 		str += 2;
175 	} else {
176 		/* 'pf' must followed by 'vf' or 'sf'. */
177 		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
178 			str = NULL;
179 			goto done;
180 		}
181 		eth_da->type = RTE_ETH_REPRESENTOR_VF;
182 	}
183 	str = rte_eth_devargs_process_list(str, eth_da->representor_ports,
184 		&eth_da->nb_representor_ports,
185 		RTE_DIM(eth_da->representor_ports));
186 done:
187 	if (str == NULL)
188 		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
189 	return str == NULL ? -1 : 0;
190 }
191 
192 struct dummy_queue {
193 	bool rx_warn_once;
194 	bool tx_warn_once;
195 };
196 static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
197 static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS];
198 RTE_INIT(dummy_queue_init)
199 {
200 	uint16_t port_id;
201 
202 	for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) {
203 		unsigned int q;
204 
205 		for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++)
206 			dummy_queues_array[port_id][q] = &per_port_queues[port_id];
207 	}
208 }
209 
210 static uint16_t
211 dummy_eth_rx_burst(void *rxq,
212 		__rte_unused struct rte_mbuf **rx_pkts,
213 		__rte_unused uint16_t nb_pkts)
214 {
215 	struct dummy_queue *queue = rxq;
216 	uintptr_t port_id;
217 
218 	port_id = queue - per_port_queues;
219 	if (port_id < RTE_DIM(per_port_queues) && !queue->rx_warn_once) {
220 		RTE_ETHDEV_LOG(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR"\n",
221 			rte_lcore_id(), port_id);
222 		rte_dump_stack();
223 		queue->rx_warn_once = true;
224 	}
225 	rte_errno = ENOTSUP;
226 	return 0;
227 }
228 
229 static uint16_t
230 dummy_eth_tx_burst(void *txq,
231 		__rte_unused struct rte_mbuf **tx_pkts,
232 		__rte_unused uint16_t nb_pkts)
233 {
234 	struct dummy_queue *queue = txq;
235 	uintptr_t port_id;
236 
237 	port_id = queue - per_port_queues;
238 	if (port_id < RTE_DIM(per_port_queues) && !queue->tx_warn_once) {
239 		RTE_ETHDEV_LOG(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR"\n",
240 			rte_lcore_id(), port_id);
241 		rte_dump_stack();
242 		queue->tx_warn_once = true;
243 	}
244 	rte_errno = ENOTSUP;
245 	return 0;
246 }
247 
248 void
249 eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
250 {
251 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
252 	uintptr_t port_id = fpo - rte_eth_fp_ops;
253 
254 	per_port_queues[port_id].rx_warn_once = false;
255 	per_port_queues[port_id].tx_warn_once = false;
256 	*fpo = (struct rte_eth_fp_ops) {
257 		.rx_pkt_burst = dummy_eth_rx_burst,
258 		.tx_pkt_burst = dummy_eth_tx_burst,
259 		.rxq = {
260 			.data = (void **)&dummy_queues_array[port_id],
261 			.clbk = dummy_data,
262 		},
263 		.txq = {
264 			.data = (void **)&dummy_queues_array[port_id],
265 			.clbk = dummy_data,
266 		},
267 	};
268 }
269 
270 void
271 eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
272 		const struct rte_eth_dev *dev)
273 {
274 	fpo->rx_pkt_burst = dev->rx_pkt_burst;
275 	fpo->tx_pkt_burst = dev->tx_pkt_burst;
276 	fpo->tx_pkt_prepare = dev->tx_pkt_prepare;
277 	fpo->rx_queue_count = dev->rx_queue_count;
278 	fpo->rx_descriptor_status = dev->rx_descriptor_status;
279 	fpo->tx_descriptor_status = dev->tx_descriptor_status;
280 
281 	fpo->rxq.data = dev->data->rx_queues;
282 	fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs;
283 
284 	fpo->txq.data = dev->data->tx_queues;
285 	fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs;
286 }
287 
288 uint16_t
289 rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
290 	struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
291 	void *opaque)
292 {
293 	const struct rte_eth_rxtx_callback *cb = opaque;
294 
295 	while (cb != NULL) {
296 		nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
297 				nb_pkts, cb->param);
298 		cb = cb->next;
299 	}
300 
301 	rte_eth_trace_call_rx_callbacks(port_id, queue_id, (void **)rx_pkts,
302 					nb_rx, nb_pkts);
303 
304 	return nb_rx;
305 }
306 
307 uint16_t
308 rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
309 	struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque)
310 {
311 	const struct rte_eth_rxtx_callback *cb = opaque;
312 
313 	while (cb != NULL) {
314 		nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
315 				cb->param);
316 		cb = cb->next;
317 	}
318 
319 	rte_eth_trace_call_tx_callbacks(port_id, queue_id, (void **)tx_pkts,
320 					nb_pkts);
321 
322 	return nb_pkts;
323 }
324 
325 void
326 eth_dev_shared_data_prepare(void)
327 {
328 	const unsigned int flags = 0;
329 	const struct rte_memzone *mz;
330 
331 	rte_spinlock_lock(&eth_dev_shared_data_lock);
332 
333 	if (eth_dev_shared_data == NULL) {
334 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
335 			/* Allocate port data and ownership shared memory. */
336 			mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
337 					sizeof(*eth_dev_shared_data),
338 					rte_socket_id(), flags);
339 		} else
340 			mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
341 		if (mz == NULL)
342 			rte_panic("Cannot allocate ethdev shared data\n");
343 
344 		eth_dev_shared_data = mz->addr;
345 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
346 			eth_dev_shared_data->next_owner_id =
347 					RTE_ETH_DEV_NO_OWNER + 1;
348 			rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
349 			memset(eth_dev_shared_data->data, 0,
350 			       sizeof(eth_dev_shared_data->data));
351 		}
352 	}
353 
354 	rte_spinlock_unlock(&eth_dev_shared_data_lock);
355 }
356 
357 void
358 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
359 {
360 	void **rxq = dev->data->rx_queues;
361 
362 	if (rxq[qid] == NULL)
363 		return;
364 
365 	if (dev->dev_ops->rx_queue_release != NULL)
366 		(*dev->dev_ops->rx_queue_release)(dev, qid);
367 	rxq[qid] = NULL;
368 }
369 
370 void
371 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
372 {
373 	void **txq = dev->data->tx_queues;
374 
375 	if (txq[qid] == NULL)
376 		return;
377 
378 	if (dev->dev_ops->tx_queue_release != NULL)
379 		(*dev->dev_ops->tx_queue_release)(dev, qid);
380 	txq[qid] = NULL;
381 }
382 
383 int
384 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
385 {
386 	uint16_t old_nb_queues = dev->data->nb_rx_queues;
387 	unsigned int i;
388 
389 	if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
390 		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
391 				sizeof(dev->data->rx_queues[0]) *
392 				RTE_MAX_QUEUES_PER_PORT,
393 				RTE_CACHE_LINE_SIZE);
394 		if (dev->data->rx_queues == NULL) {
395 			dev->data->nb_rx_queues = 0;
396 			return -(ENOMEM);
397 		}
398 	} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
399 		for (i = nb_queues; i < old_nb_queues; i++)
400 			eth_dev_rxq_release(dev, i);
401 
402 	} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
403 		for (i = nb_queues; i < old_nb_queues; i++)
404 			eth_dev_rxq_release(dev, i);
405 
406 		rte_free(dev->data->rx_queues);
407 		dev->data->rx_queues = NULL;
408 	}
409 	dev->data->nb_rx_queues = nb_queues;
410 	return 0;
411 }
412 
413 int
414 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
415 {
416 	uint16_t old_nb_queues = dev->data->nb_tx_queues;
417 	unsigned int i;
418 
419 	if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
420 		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
421 				sizeof(dev->data->tx_queues[0]) *
422 				RTE_MAX_QUEUES_PER_PORT,
423 				RTE_CACHE_LINE_SIZE);
424 		if (dev->data->tx_queues == NULL) {
425 			dev->data->nb_tx_queues = 0;
426 			return -(ENOMEM);
427 		}
428 	} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
429 		for (i = nb_queues; i < old_nb_queues; i++)
430 			eth_dev_txq_release(dev, i);
431 
432 	} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
433 		for (i = nb_queues; i < old_nb_queues; i++)
434 			eth_dev_txq_release(dev, i);
435 
436 		rte_free(dev->data->tx_queues);
437 		dev->data->tx_queues = NULL;
438 	}
439 	dev->data->nb_tx_queues = nb_queues;
440 	return 0;
441 }
442