xref: /dpdk/lib/ethdev/ethdev_private.c (revision 5d52418fa4b9a7f28eaedc1d88ec5cf330381c0e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Gaëtan Rivet
3  */
4 
5 #include <rte_debug.h>
6 
7 #include "rte_ethdev.h"
8 #include "rte_ethdev_trace_fp.h"
9 #include "ethdev_driver.h"
10 #include "ethdev_private.h"
11 
12 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
13 
14 /* Shared memory between primary and secondary processes. */
15 struct eth_dev_shared *eth_dev_shared_data;
16 
17 /* spinlock for shared data allocation */
18 static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
19 
20 /* spinlock for eth device callbacks */
21 rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
22 
23 uint16_t
24 eth_dev_to_id(const struct rte_eth_dev *dev)
25 {
26 	if (dev == NULL)
27 		return RTE_MAX_ETHPORTS;
28 	return dev - rte_eth_devices;
29 }
30 
31 struct rte_eth_dev *
32 eth_find_device(const struct rte_eth_dev *start, rte_eth_cmp_t cmp,
33 		const void *data)
34 {
35 	struct rte_eth_dev *edev;
36 	ptrdiff_t idx;
37 
38 	/* Avoid Undefined Behaviour */
39 	if (start != NULL &&
40 	    (start < &rte_eth_devices[0] ||
41 	     start > &rte_eth_devices[RTE_MAX_ETHPORTS]))
42 		return NULL;
43 	if (start != NULL)
44 		idx = eth_dev_to_id(start) + 1;
45 	else
46 		idx = 0;
47 	for (; idx < RTE_MAX_ETHPORTS; idx++) {
48 		edev = &rte_eth_devices[idx];
49 		if (cmp(edev, data) == 0)
50 			return edev;
51 	}
52 	return NULL;
53 }
54 
55 /* Put new value into list. */
56 static int
57 rte_eth_devargs_enlist(uint16_t *list, uint16_t *len_list,
58 		       const uint16_t max_list, uint16_t val)
59 {
60 	uint16_t i;
61 
62 	for (i = 0; i < *len_list; i++) {
63 		if (list[i] == val)
64 			return 0;
65 	}
66 	if (*len_list >= max_list)
67 		return -1;
68 	list[(*len_list)++] = val;
69 	return 0;
70 }
71 
72 /* Parse and enlist a range expression of "min-max" or a single value. */
73 static char *
74 rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
75 	const uint16_t max_list)
76 {
77 	uint16_t lo, hi, val;
78 	int result, n = 0;
79 	char *pos = str;
80 
81 	result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
82 	if (result == 1) {
83 		if (rte_eth_devargs_enlist(list, len_list, max_list, lo) != 0)
84 			return NULL;
85 	} else if (result == 2) {
86 		if (lo > hi)
87 			return NULL;
88 		for (val = lo; val <= hi; val++) {
89 			if (rte_eth_devargs_enlist(list, len_list, max_list,
90 						   val) != 0)
91 				return NULL;
92 		}
93 	} else
94 		return NULL;
95 	return pos + n;
96 }
97 
98 /*
99  * Parse list of values separated by ",".
100  * Each value could be a range [min-max] or single number.
101  * Examples:
102  *  2               - single
103  *  [1,2,3]         - single list
104  *  [1,3-5,7,9-11]  - list with singles and ranges
105  */
106 static char *
107 rte_eth_devargs_process_list(char *str, uint16_t *list, uint16_t *len_list,
108 	const uint16_t max_list)
109 {
110 	char *pos = str;
111 
112 	if (*pos == '[')
113 		pos++;
114 	while (1) {
115 		pos = rte_eth_devargs_process_range(pos, list, len_list,
116 						    max_list);
117 		if (pos == NULL)
118 			return NULL;
119 		if (*pos != ',') /* end of list */
120 			break;
121 		pos++;
122 	}
123 	if (*str == '[' && *pos != ']')
124 		return NULL;
125 	if (*pos == ']')
126 		pos++;
127 	return pos;
128 }
129 
130 /*
131  * Parse representor ports from a single value or lists.
132  *
133  * Representor format:
134  *   #: range or single number of VF representor - legacy
135  *   [[c#]pf#]vf#: VF port representor/s
136  *   [[c#]pf#]sf#: SF port representor/s
137  *   [c#]pf#:      PF port representor/s
138  *
139  * Examples of #:
140  *  2               - single
141  *  [1,2,3]         - single list
142  *  [1,3-5,7,9-11]  - list with singles and ranges
143  */
144 int
145 rte_eth_devargs_parse_representor_ports(char *str, void *data)
146 {
147 	struct rte_eth_devargs *eth_da = data;
148 
149 	if (str[0] == 'c') {
150 		str += 1;
151 		str = rte_eth_devargs_process_list(str, eth_da->mh_controllers,
152 				&eth_da->nb_mh_controllers,
153 				RTE_DIM(eth_da->mh_controllers));
154 		if (str == NULL)
155 			goto done;
156 	}
157 	if (str[0] == 'p' && str[1] == 'f') {
158 		eth_da->type = RTE_ETH_REPRESENTOR_PF;
159 		str += 2;
160 		str = rte_eth_devargs_process_list(str, eth_da->ports,
161 				&eth_da->nb_ports, RTE_DIM(eth_da->ports));
162 		if (str == NULL || str[0] == '\0')
163 			goto done;
164 	} else if (eth_da->nb_mh_controllers > 0) {
165 		/* 'c' must followed by 'pf'. */
166 		str = NULL;
167 		goto done;
168 	}
169 	if (str[0] == 'v' && str[1] == 'f') {
170 		eth_da->type = RTE_ETH_REPRESENTOR_VF;
171 		str += 2;
172 	} else if (str[0] == 's' && str[1] == 'f') {
173 		eth_da->type = RTE_ETH_REPRESENTOR_SF;
174 		str += 2;
175 	} else {
176 		/* 'pf' must followed by 'vf' or 'sf'. */
177 		if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
178 			str = NULL;
179 			goto done;
180 		}
181 		eth_da->type = RTE_ETH_REPRESENTOR_VF;
182 	}
183 	str = rte_eth_devargs_process_list(str, eth_da->representor_ports,
184 		&eth_da->nb_representor_ports,
185 		RTE_DIM(eth_da->representor_ports));
186 done:
187 	if (str == NULL)
188 		RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str);
189 	return str == NULL ? -1 : 0;
190 }
191 
192 struct dummy_queue {
193 	bool rx_warn_once;
194 	bool tx_warn_once;
195 };
196 static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
197 static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS];
198 RTE_INIT(dummy_queue_init)
199 {
200 	uint16_t port_id;
201 
202 	for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) {
203 		unsigned int q;
204 
205 		for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++)
206 			dummy_queues_array[port_id][q] = &per_port_queues[port_id];
207 	}
208 }
209 
210 static uint16_t
211 dummy_eth_rx_burst(void *rxq,
212 		__rte_unused struct rte_mbuf **rx_pkts,
213 		__rte_unused uint16_t nb_pkts)
214 {
215 	struct dummy_queue *queue = rxq;
216 	uintptr_t port_id;
217 
218 	port_id = queue - per_port_queues;
219 	if (port_id < RTE_DIM(per_port_queues) && !queue->rx_warn_once) {
220 		RTE_ETHDEV_LOG(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR"\n",
221 			rte_lcore_id(), port_id);
222 		rte_dump_stack();
223 		queue->rx_warn_once = true;
224 	}
225 	rte_errno = ENOTSUP;
226 	return 0;
227 }
228 
229 static uint16_t
230 dummy_eth_tx_burst(void *txq,
231 		__rte_unused struct rte_mbuf **tx_pkts,
232 		__rte_unused uint16_t nb_pkts)
233 {
234 	struct dummy_queue *queue = txq;
235 	uintptr_t port_id;
236 
237 	port_id = queue - per_port_queues;
238 	if (port_id < RTE_DIM(per_port_queues) && !queue->tx_warn_once) {
239 		RTE_ETHDEV_LOG(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR"\n",
240 			rte_lcore_id(), port_id);
241 		rte_dump_stack();
242 		queue->tx_warn_once = true;
243 	}
244 	rte_errno = ENOTSUP;
245 	return 0;
246 }
247 
248 void
249 eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
250 {
251 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
252 	uintptr_t port_id = fpo - rte_eth_fp_ops;
253 
254 	per_port_queues[port_id].rx_warn_once = false;
255 	per_port_queues[port_id].tx_warn_once = false;
256 	*fpo = (struct rte_eth_fp_ops) {
257 		.rx_pkt_burst = dummy_eth_rx_burst,
258 		.tx_pkt_burst = dummy_eth_tx_burst,
259 		.rxq = {
260 			.data = (void **)&dummy_queues_array[port_id],
261 			.clbk = dummy_data,
262 		},
263 		.txq = {
264 			.data = (void **)&dummy_queues_array[port_id],
265 			.clbk = dummy_data,
266 		},
267 	};
268 }
269 
270 void
271 eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
272 		const struct rte_eth_dev *dev)
273 {
274 	fpo->rx_pkt_burst = dev->rx_pkt_burst;
275 	fpo->tx_pkt_burst = dev->tx_pkt_burst;
276 	fpo->tx_pkt_prepare = dev->tx_pkt_prepare;
277 	fpo->rx_queue_count = dev->rx_queue_count;
278 	fpo->rx_descriptor_status = dev->rx_descriptor_status;
279 	fpo->tx_descriptor_status = dev->tx_descriptor_status;
280 	fpo->recycle_tx_mbufs_reuse = dev->recycle_tx_mbufs_reuse;
281 	fpo->recycle_rx_descriptors_refill = dev->recycle_rx_descriptors_refill;
282 
283 	fpo->rxq.data = dev->data->rx_queues;
284 	fpo->rxq.clbk = (void **)(uintptr_t)dev->post_rx_burst_cbs;
285 
286 	fpo->txq.data = dev->data->tx_queues;
287 	fpo->txq.clbk = (void **)(uintptr_t)dev->pre_tx_burst_cbs;
288 }
289 
290 uint16_t
291 rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
292 	struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
293 	void *opaque)
294 {
295 	const struct rte_eth_rxtx_callback *cb = opaque;
296 
297 	while (cb != NULL) {
298 		nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
299 				nb_pkts, cb->param);
300 		cb = cb->next;
301 	}
302 
303 	rte_eth_trace_call_rx_callbacks(port_id, queue_id, (void **)rx_pkts,
304 					nb_rx, nb_pkts);
305 
306 	return nb_rx;
307 }
308 
309 uint16_t
310 rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
311 	struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque)
312 {
313 	const struct rte_eth_rxtx_callback *cb = opaque;
314 
315 	while (cb != NULL) {
316 		nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
317 				cb->param);
318 		cb = cb->next;
319 	}
320 
321 	rte_eth_trace_call_tx_callbacks(port_id, queue_id, (void **)tx_pkts,
322 					nb_pkts);
323 
324 	return nb_pkts;
325 }
326 
327 void
328 eth_dev_shared_data_prepare(void)
329 {
330 	const unsigned int flags = 0;
331 	const struct rte_memzone *mz;
332 
333 	rte_spinlock_lock(&eth_dev_shared_data_lock);
334 
335 	if (eth_dev_shared_data == NULL) {
336 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
337 			/* Allocate port data and ownership shared memory. */
338 			mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
339 					sizeof(*eth_dev_shared_data),
340 					rte_socket_id(), flags);
341 		} else
342 			mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
343 		if (mz == NULL)
344 			rte_panic("Cannot allocate ethdev shared data\n");
345 
346 		eth_dev_shared_data = mz->addr;
347 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
348 			eth_dev_shared_data->next_owner_id =
349 					RTE_ETH_DEV_NO_OWNER + 1;
350 			rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
351 			memset(eth_dev_shared_data->data, 0,
352 			       sizeof(eth_dev_shared_data->data));
353 		}
354 	}
355 
356 	rte_spinlock_unlock(&eth_dev_shared_data_lock);
357 }
358 
359 void
360 eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
361 {
362 	void **rxq = dev->data->rx_queues;
363 
364 	if (rxq[qid] == NULL)
365 		return;
366 
367 	if (dev->dev_ops->rx_queue_release != NULL)
368 		(*dev->dev_ops->rx_queue_release)(dev, qid);
369 	rxq[qid] = NULL;
370 }
371 
372 void
373 eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
374 {
375 	void **txq = dev->data->tx_queues;
376 
377 	if (txq[qid] == NULL)
378 		return;
379 
380 	if (dev->dev_ops->tx_queue_release != NULL)
381 		(*dev->dev_ops->tx_queue_release)(dev, qid);
382 	txq[qid] = NULL;
383 }
384 
385 int
386 eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
387 {
388 	uint16_t old_nb_queues = dev->data->nb_rx_queues;
389 	unsigned int i;
390 
391 	if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
392 		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
393 				sizeof(dev->data->rx_queues[0]) *
394 				RTE_MAX_QUEUES_PER_PORT,
395 				RTE_CACHE_LINE_SIZE);
396 		if (dev->data->rx_queues == NULL) {
397 			dev->data->nb_rx_queues = 0;
398 			return -(ENOMEM);
399 		}
400 	} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
401 		for (i = nb_queues; i < old_nb_queues; i++)
402 			eth_dev_rxq_release(dev, i);
403 
404 	} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
405 		for (i = nb_queues; i < old_nb_queues; i++)
406 			eth_dev_rxq_release(dev, i);
407 
408 		rte_free(dev->data->rx_queues);
409 		dev->data->rx_queues = NULL;
410 	}
411 	dev->data->nb_rx_queues = nb_queues;
412 	return 0;
413 }
414 
415 int
416 eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
417 {
418 	uint16_t old_nb_queues = dev->data->nb_tx_queues;
419 	unsigned int i;
420 
421 	if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
422 		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
423 				sizeof(dev->data->tx_queues[0]) *
424 				RTE_MAX_QUEUES_PER_PORT,
425 				RTE_CACHE_LINE_SIZE);
426 		if (dev->data->tx_queues == NULL) {
427 			dev->data->nb_tx_queues = 0;
428 			return -(ENOMEM);
429 		}
430 	} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
431 		for (i = nb_queues; i < old_nb_queues; i++)
432 			eth_dev_txq_release(dev, i);
433 
434 	} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
435 		for (i = nb_queues; i < old_nb_queues; i++)
436 			eth_dev_txq_release(dev, i);
437 
438 		rte_free(dev->data->tx_queues);
439 		dev->data->tx_queues = NULL;
440 	}
441 	dev->data->nb_tx_queues = nb_queues;
442 	return 0;
443 }
444