xref: /dpdk/lib/ethdev/ethdev_driver.c (revision 5e46b176d37787c5536d48b23fff8baf5d674c88)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdalign.h>
7 #include <stdlib.h>
8 #include <pthread.h>
9 
10 #include <rte_kvargs.h>
11 #include <rte_malloc.h>
12 
13 #include "ethdev_driver.h"
14 #include "ethdev_private.h"
15 #include "rte_flow_driver.h"
16 
17 /**
18  * A set of values to describe the possible states of a switch domain.
19  */
20 enum rte_eth_switch_domain_state {
21 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
22 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
23 };
24 
25 /**
26  * Array of switch domains available for allocation. Array is sized to
27  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
28  * ethdev ports in a single process.
29  */
30 static struct rte_eth_dev_switch {
31 	enum rte_eth_switch_domain_state state;
32 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
33 
34 static struct rte_eth_dev *
35 eth_dev_allocated(const char *name)
36 {
37 	uint16_t i;
38 
39 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
40 
41 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
42 		if (rte_eth_devices[i].data != NULL &&
43 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
44 			return &rte_eth_devices[i];
45 	}
46 	return NULL;
47 }
48 
49 static uint16_t
50 eth_dev_find_free_port(void)
51 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
52 {
53 	uint16_t i;
54 
55 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
56 		/* Using shared name field to find a free port. */
57 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
58 			RTE_ASSERT(rte_eth_devices[i].state ==
59 				   RTE_ETH_DEV_UNUSED);
60 			return i;
61 		}
62 	}
63 	return RTE_MAX_ETHPORTS;
64 }
65 
66 static struct rte_eth_dev *
67 eth_dev_get(uint16_t port_id)
68 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
69 {
70 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
71 
72 	eth_dev->data = &eth_dev_shared_data->data[port_id];
73 
74 	return eth_dev;
75 }
76 
77 struct rte_eth_dev *
78 rte_eth_dev_allocate(const char *name)
79 {
80 	uint16_t port_id;
81 	struct rte_eth_dev *eth_dev = NULL;
82 	size_t name_len;
83 
84 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
85 	if (name_len == 0) {
86 		RTE_ETHDEV_LOG_LINE(ERR, "Zero length Ethernet device name");
87 		return NULL;
88 	}
89 
90 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
91 		RTE_ETHDEV_LOG_LINE(ERR, "Ethernet device name is too long");
92 		return NULL;
93 	}
94 
95 	/* Synchronize port creation between primary and secondary processes. */
96 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
97 
98 	if (eth_dev_shared_data_prepare() == NULL)
99 		goto unlock;
100 
101 	if (eth_dev_allocated(name) != NULL) {
102 		RTE_ETHDEV_LOG_LINE(ERR,
103 			"Ethernet device with name %s already allocated",
104 			name);
105 		goto unlock;
106 	}
107 
108 	port_id = eth_dev_find_free_port();
109 	if (port_id == RTE_MAX_ETHPORTS) {
110 		RTE_ETHDEV_LOG_LINE(ERR,
111 			"Reached maximum number of Ethernet ports");
112 		goto unlock;
113 	}
114 
115 	eth_dev = eth_dev_get(port_id);
116 	eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
117 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
118 	eth_dev->data->port_id = port_id;
119 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
120 	eth_dev->data->mtu = RTE_ETHER_MTU;
121 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
122 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
123 	eth_dev_shared_data->allocated_ports++;
124 
125 unlock:
126 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
127 
128 	return eth_dev;
129 }
130 
131 struct rte_eth_dev *
132 rte_eth_dev_allocated(const char *name)
133 {
134 	struct rte_eth_dev *ethdev;
135 
136 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
137 
138 	if (eth_dev_shared_data_prepare() != NULL)
139 		ethdev = eth_dev_allocated(name);
140 	else
141 		ethdev = NULL;
142 
143 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
144 
145 	return ethdev;
146 }
147 
148 /*
149  * Attach to a port already registered by the primary process, which
150  * makes sure that the same device would have the same port ID both
151  * in the primary and secondary process.
152  */
153 struct rte_eth_dev *
154 rte_eth_dev_attach_secondary(const char *name)
155 {
156 	uint16_t i;
157 	struct rte_eth_dev *eth_dev = NULL;
158 
159 	/* Synchronize port attachment to primary port creation and release. */
160 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
161 
162 	if (eth_dev_shared_data_prepare() == NULL)
163 		goto unlock;
164 
165 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
166 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
167 			break;
168 	}
169 	if (i == RTE_MAX_ETHPORTS) {
170 		RTE_ETHDEV_LOG_LINE(ERR,
171 			"Device %s is not driven by the primary process",
172 			name);
173 	} else {
174 		eth_dev = eth_dev_get(i);
175 		RTE_ASSERT(eth_dev->data->port_id == i);
176 	}
177 
178 unlock:
179 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
180 	return eth_dev;
181 }
182 
183 int
184 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
185 	enum rte_eth_event_type event, void *ret_param)
186 {
187 	struct rte_eth_dev_callback *cb_lst;
188 	struct rte_eth_dev_callback dev_cb;
189 	int rc = 0;
190 
191 	rte_spinlock_lock(&eth_dev_cb_lock);
192 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
193 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
194 			continue;
195 		dev_cb = *cb_lst;
196 		cb_lst->active = 1;
197 		if (ret_param != NULL)
198 			dev_cb.ret_param = ret_param;
199 
200 		rte_spinlock_unlock(&eth_dev_cb_lock);
201 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
202 				dev_cb.cb_arg, dev_cb.ret_param);
203 		rte_spinlock_lock(&eth_dev_cb_lock);
204 		cb_lst->active = 0;
205 	}
206 	rte_spinlock_unlock(&eth_dev_cb_lock);
207 	return rc;
208 }
209 
210 void
211 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
212 {
213 	if (dev == NULL)
214 		return;
215 
216 	/*
217 	 * for secondary process, at that point we expect device
218 	 * to be already 'usable', so shared data and all function pointers
219 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
220 	 */
221 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
222 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
223 
224 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
225 
226 	dev->state = RTE_ETH_DEV_ATTACHED;
227 }
228 
229 int
230 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
231 {
232 	int ret;
233 
234 	if (eth_dev == NULL)
235 		return -EINVAL;
236 
237 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
238 	if (eth_dev_shared_data_prepare() == NULL)
239 		ret = -EINVAL;
240 	else
241 		ret = 0;
242 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
243 	if (ret != 0)
244 		return ret;
245 
246 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
247 		rte_eth_dev_callback_process(eth_dev,
248 				RTE_ETH_EVENT_DESTROY, NULL);
249 
250 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
251 
252 	eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
253 
254 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
255 
256 	eth_dev->state = RTE_ETH_DEV_UNUSED;
257 	eth_dev->device = NULL;
258 	eth_dev->process_private = NULL;
259 	eth_dev->intr_handle = NULL;
260 	eth_dev->rx_pkt_burst = NULL;
261 	eth_dev->tx_pkt_burst = NULL;
262 	eth_dev->tx_pkt_prepare = NULL;
263 	eth_dev->rx_queue_count = NULL;
264 	eth_dev->rx_descriptor_status = NULL;
265 	eth_dev->tx_descriptor_status = NULL;
266 	eth_dev->dev_ops = NULL;
267 
268 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
269 		rte_free(eth_dev->data->rx_queues);
270 		rte_free(eth_dev->data->tx_queues);
271 		rte_free(eth_dev->data->mac_addrs);
272 		rte_free(eth_dev->data->hash_mac_addrs);
273 		rte_free(eth_dev->data->dev_private);
274 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
275 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
276 		eth_dev->data = NULL;
277 
278 		eth_dev_shared_data->allocated_ports--;
279 		eth_dev_shared_data_release();
280 	}
281 
282 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
283 
284 	return 0;
285 }
286 
287 int
288 rte_eth_dev_create(struct rte_device *device, const char *name,
289 	size_t priv_data_size,
290 	ethdev_bus_specific_init ethdev_bus_specific_init,
291 	void *bus_init_params,
292 	ethdev_init_t ethdev_init, void *init_params)
293 {
294 	struct rte_eth_dev *ethdev;
295 	int retval;
296 
297 	if (*ethdev_init == NULL)
298 		return -EINVAL;
299 
300 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
301 		ethdev = rte_eth_dev_allocate(name);
302 		if (!ethdev)
303 			return -ENODEV;
304 
305 		if (priv_data_size) {
306 			/* try alloc private data on device-local node. */
307 			ethdev->data->dev_private = rte_zmalloc_socket(
308 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
309 				device->numa_node);
310 
311 			/* fall back to alloc on any socket on failure */
312 			if (ethdev->data->dev_private == NULL) {
313 				ethdev->data->dev_private = rte_zmalloc(name,
314 						priv_data_size, RTE_CACHE_LINE_SIZE);
315 
316 				if (ethdev->data->dev_private == NULL) {
317 					RTE_ETHDEV_LOG_LINE(ERR, "failed to allocate private data");
318 					retval = -ENOMEM;
319 					goto probe_failed;
320 				}
321 				/* got memory, but not local, so issue warning */
322 				RTE_ETHDEV_LOG_LINE(WARNING,
323 						"Private data for ethdev '%s' not allocated on local NUMA node %d",
324 						device->name, device->numa_node);
325 			}
326 		}
327 	} else {
328 		ethdev = rte_eth_dev_attach_secondary(name);
329 		if (!ethdev) {
330 			RTE_ETHDEV_LOG_LINE(ERR,
331 				"secondary process attach failed, ethdev doesn't exist");
332 			return  -ENODEV;
333 		}
334 	}
335 
336 	ethdev->device = device;
337 
338 	if (ethdev_bus_specific_init) {
339 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
340 		if (retval) {
341 			RTE_ETHDEV_LOG_LINE(ERR,
342 				"ethdev bus specific initialisation failed");
343 			goto probe_failed;
344 		}
345 	}
346 
347 	retval = ethdev_init(ethdev, init_params);
348 	if (retval) {
349 		RTE_ETHDEV_LOG_LINE(ERR, "ethdev initialisation failed");
350 		goto probe_failed;
351 	}
352 
353 	rte_eth_dev_probing_finish(ethdev);
354 
355 	return retval;
356 
357 probe_failed:
358 	rte_eth_dev_release_port(ethdev);
359 	return retval;
360 }
361 
362 int
363 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
364 	ethdev_uninit_t ethdev_uninit)
365 {
366 	int ret;
367 
368 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
369 	if (!ethdev)
370 		return -ENODEV;
371 
372 	if (*ethdev_uninit == NULL)
373 		return -EINVAL;
374 
375 	ret = ethdev_uninit(ethdev);
376 	if (ret)
377 		return ret;
378 
379 	return rte_eth_dev_release_port(ethdev);
380 }
381 
382 struct rte_eth_dev *
383 rte_eth_dev_get_by_name(const char *name)
384 {
385 	uint16_t pid;
386 
387 	if (rte_eth_dev_get_port_by_name(name, &pid))
388 		return NULL;
389 
390 	return &rte_eth_devices[pid];
391 }
392 
393 int
394 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
395 {
396 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
397 		return 1;
398 	return 0;
399 }
400 
401 int
402 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
403 {
404 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
405 		return 1;
406 	return 0;
407 }
408 
409 void
410 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
411 {
412 	if (dev->data->dev_started) {
413 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u must be stopped to allow reset",
414 			dev->data->port_id);
415 		return;
416 	}
417 
418 	eth_dev_rx_queue_config(dev, 0);
419 	eth_dev_tx_queue_config(dev, 0);
420 
421 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
422 }
423 
424 static int
425 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
426 {
427 	int state;
428 	struct rte_kvargs_pair *pair;
429 	char *letter;
430 
431 	arglist->str = strdup(str_in);
432 	if (arglist->str == NULL)
433 		return -ENOMEM;
434 
435 	letter = arglist->str;
436 	state = 0;
437 	arglist->count = 0;
438 	pair = &arglist->pairs[0];
439 	while (1) {
440 		switch (state) {
441 		case 0: /* Initial */
442 			if (*letter == '=')
443 				return -EINVAL;
444 			else if (*letter == '\0')
445 				return 0;
446 
447 			state = 1;
448 			pair->key = letter;
449 			/* fallthrough */
450 
451 		case 1: /* Parsing key */
452 			if (*letter == '=') {
453 				*letter = '\0';
454 				pair->value = letter + 1;
455 				state = 2;
456 			} else if (*letter == ',' || *letter == '\0')
457 				return -EINVAL;
458 			break;
459 
460 
461 		case 2: /* Parsing value */
462 			if (*letter == '[')
463 				state = 3;
464 			else if (*letter == ',') {
465 				*letter = '\0';
466 				arglist->count++;
467 				pair = &arglist->pairs[arglist->count];
468 				state = 0;
469 			} else if (*letter == '\0') {
470 				letter--;
471 				arglist->count++;
472 				pair = &arglist->pairs[arglist->count];
473 				state = 0;
474 			}
475 			break;
476 
477 		case 3: /* Parsing list */
478 			if (*letter == ']') {
479 				/* For devargs having singles lists move to state 2 once letter
480 				 * becomes ']' so each can be considered as different pair key
481 				 * value. But in nested lists case e.g. multiple representors
482 				 * case i.e. [pf[0-3],pfvf[3,4-6]], complete nested list should
483 				 * be considered as one pair value, hence checking if end of outer
484 				 * list ']' is reached else stay on state 3.
485 				 */
486 				if ((strcmp("representor", pair->key) == 0) &&
487 				    (*(letter + 1) != '\0' && *(letter + 2) != '\0' &&
488 				     *(letter + 3) != '\0')			    &&
489 				    ((*(letter + 2) == 'p' && *(letter + 3) == 'f')   ||
490 				     (*(letter + 2) == 'v' && *(letter + 3) == 'f')   ||
491 				     (*(letter + 2) == 's' && *(letter + 3) == 'f')   ||
492 				     (*(letter + 2) == 'c' && isdigit(*(letter + 3))) ||
493 				     (*(letter + 2) == '[' && isdigit(*(letter + 3))) ||
494 				     (isdigit(*(letter + 2)))))
495 					state = 3;
496 				else
497 					state = 2;
498 			} else if (*letter == '\0') {
499 				return -EINVAL;
500 			}
501 			break;
502 		}
503 		letter++;
504 	}
505 }
506 
507 static int
508 devargs_parse_representor_ports(struct rte_eth_devargs *eth_devargs, char
509 				*da_val, unsigned int da_idx, unsigned int nb_da)
510 {
511 	struct rte_eth_devargs *eth_da;
512 	int result = 0;
513 
514 	if (da_idx + 1 > nb_da) {
515 		RTE_ETHDEV_LOG_LINE(ERR, "Devargs parsed %d > max array size %d",
516 			       da_idx + 1, nb_da);
517 		result = -1;
518 		goto parse_cleanup;
519 	}
520 	eth_da = &eth_devargs[da_idx];
521 	memset(eth_da, 0, sizeof(*eth_da));
522 	RTE_ETHDEV_LOG_LINE(DEBUG, "	  Devargs idx %d value %s", da_idx, da_val);
523 	result = rte_eth_devargs_parse_representor_ports(da_val, eth_da);
524 
525 parse_cleanup:
526 	return result;
527 }
528 
529 static int
530 eth_dev_tokenise_representor_list(char *p_val, struct rte_eth_devargs *eth_devargs,
531 				  unsigned int nb_da)
532 {
533 	char da_val[BUFSIZ], str[BUFSIZ];
534 	bool is_rep_portid_list = true;
535 	unsigned int devargs = 0;
536 	int result = 0, len = 0;
537 	int i = 0, j = 0;
538 	char *pos;
539 
540 	pos = p_val;
541 	/* Length of consolidated list */
542 	while (*pos++ != '\0') {
543 		len++;
544 		if (isalpha(*pos))
545 			is_rep_portid_list = false;
546 	}
547 
548 	/* List of representor portIDs i.e.[1,2,3] should be considered as single representor case*/
549 	if (is_rep_portid_list) {
550 		result = devargs_parse_representor_ports(eth_devargs, p_val, 0, 1);
551 		if (result < 0)
552 			return result;
553 
554 		devargs++;
555 		return devargs;
556 	}
557 
558 	memset(str, 0, BUFSIZ);
559 	memset(da_val, 0, BUFSIZ);
560 	/* Remove the exterior [] of the consolidated list */
561 	strncpy(str, &p_val[1], len - 2);
562 	while (1) {
563 		if (str[i] == '\0') {
564 			if (da_val[0] != '\0') {
565 				result = devargs_parse_representor_ports(eth_devargs, da_val,
566 									 devargs, nb_da);
567 				if (result < 0)
568 					goto parse_cleanup;
569 
570 				devargs++;
571 			}
572 			break;
573 		}
574 		if (str[i] == ',' || str[i] == '[') {
575 			if (str[i] == ',') {
576 				if (da_val[0] != '\0') {
577 					da_val[j + 1] = '\0';
578 					result = devargs_parse_representor_ports(eth_devargs,
579 										 da_val, devargs,
580 										 nb_da);
581 					if (result < 0)
582 						goto parse_cleanup;
583 
584 					devargs++;
585 					j = 0;
586 					memset(da_val, 0, BUFSIZ);
587 				}
588 			}
589 
590 			if (str[i] == '[') {
591 				while (str[i] != ']' || isalpha(str[i + 1])) {
592 					da_val[j] = str[i];
593 					j++;
594 					i++;
595 				}
596 				da_val[j] = ']';
597 				da_val[j + 1] = '\0';
598 				result = devargs_parse_representor_ports(eth_devargs, da_val,
599 									 devargs, nb_da);
600 				if (result < 0)
601 					goto parse_cleanup;
602 
603 				devargs++;
604 				j = 0;
605 				memset(da_val, 0, BUFSIZ);
606 			}
607 		} else {
608 			da_val[j] = str[i];
609 			j++;
610 		}
611 		i++;
612 	}
613 	result = devargs;
614 
615 parse_cleanup:
616 	return result;
617 }
618 
619 int
620 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_devargs,
621 		      unsigned int nb_da)
622 {
623 	struct rte_kvargs_pair *pair;
624 	struct rte_kvargs args;
625 	bool dup_rep = false;
626 	int devargs = 0;
627 	unsigned int i;
628 	int result = 0;
629 
630 	memset(eth_devargs, 0, nb_da * sizeof(*eth_devargs));
631 
632 	result = eth_dev_devargs_tokenise(&args, dargs);
633 	if (result < 0)
634 		goto parse_cleanup;
635 
636 	for (i = 0; i < args.count; i++) {
637 		pair = &args.pairs[i];
638 		if (strcmp("representor", pair->key) == 0) {
639 			if (dup_rep) {
640 				RTE_ETHDEV_LOG_LINE(ERR, "Duplicated representor key: %s",
641 						    pair->value);
642 				result = -1;
643 				goto parse_cleanup;
644 			}
645 
646 			RTE_ETHDEV_LOG_LINE(DEBUG, "Devarg pattern: %s", pair->value);
647 			if (pair->value[0] == '[') {
648 				/* Multiple representor list case */
649 				devargs = eth_dev_tokenise_representor_list(pair->value,
650 									    eth_devargs, nb_da);
651 				if (devargs < 0)
652 					goto parse_cleanup;
653 			} else {
654 				/* Single representor case */
655 				devargs = devargs_parse_representor_ports(eth_devargs, pair->value,
656 									  0, 1);
657 				if (devargs < 0)
658 					goto parse_cleanup;
659 				devargs++;
660 			}
661 			dup_rep = true;
662 		}
663 	}
664 	RTE_ETHDEV_LOG_LINE(DEBUG, "Total devargs parsed %d", devargs);
665 	result = devargs;
666 
667 parse_cleanup:
668 	free(args.str);
669 
670 	return result;
671 }
672 
673 static inline int
674 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
675 		const char *ring_name)
676 {
677 	return snprintf(name, len, "eth_p%d_q%d_%s",
678 			port_id, queue_id, ring_name);
679 }
680 
681 int
682 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
683 		uint16_t queue_id)
684 {
685 	char z_name[RTE_MEMZONE_NAMESIZE];
686 	const struct rte_memzone *mz;
687 	int rc = 0;
688 
689 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
690 			queue_id, ring_name);
691 	if (rc >= RTE_MEMZONE_NAMESIZE) {
692 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
693 		return -ENAMETOOLONG;
694 	}
695 
696 	mz = rte_memzone_lookup(z_name);
697 	if (mz)
698 		rc = rte_memzone_free(mz);
699 	else
700 		rc = -ENOENT;
701 
702 	return rc;
703 }
704 
705 const struct rte_memzone *
706 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
707 			 uint16_t queue_id, size_t size, unsigned int align,
708 			 int socket_id)
709 {
710 	char z_name[RTE_MEMZONE_NAMESIZE];
711 	const struct rte_memzone *mz;
712 	int rc;
713 
714 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
715 			queue_id, ring_name);
716 	if (rc >= RTE_MEMZONE_NAMESIZE) {
717 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
718 		rte_errno = ENAMETOOLONG;
719 		return NULL;
720 	}
721 
722 	mz = rte_memzone_lookup(z_name);
723 	if (mz) {
724 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
725 				size > mz->len ||
726 				((uintptr_t)mz->addr & (align - 1)) != 0) {
727 			RTE_ETHDEV_LOG_LINE(ERR,
728 				"memzone %s does not justify the requested attributes",
729 				mz->name);
730 			return NULL;
731 		}
732 
733 		return mz;
734 	}
735 
736 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
737 			RTE_MEMZONE_IOVA_CONTIG, align);
738 }
739 
740 int
741 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
742 				struct rte_hairpin_peer_info *peer_info,
743 				uint32_t direction)
744 {
745 	struct rte_eth_dev *dev;
746 
747 	if (peer_info == NULL)
748 		return -EINVAL;
749 
750 	/* No need to check the validity again. */
751 	dev = &rte_eth_devices[cur_port];
752 	if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
753 		return -ENOTSUP;
754 
755 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
756 							peer_info, direction);
757 }
758 
759 int
760 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
761 				  uint32_t direction)
762 {
763 	struct rte_eth_dev *dev;
764 
765 	/* No need to check the validity again. */
766 	dev = &rte_eth_devices[cur_port];
767 	if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
768 		return -ENOTSUP;
769 
770 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
771 							  direction);
772 }
773 
774 int
775 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
776 				  struct rte_hairpin_peer_info *cur_info,
777 				  struct rte_hairpin_peer_info *peer_info,
778 				  uint32_t direction)
779 {
780 	struct rte_eth_dev *dev;
781 
782 	/* Current queue information is not mandatory. */
783 	if (peer_info == NULL)
784 		return -EINVAL;
785 
786 	/* No need to check the validity again. */
787 	dev = &rte_eth_devices[peer_port];
788 	if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
789 		return -ENOTSUP;
790 
791 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
792 					cur_info, peer_info, direction);
793 }
794 
795 int
796 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
797 {
798 	static const struct rte_mbuf_dynfield field_desc = {
799 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
800 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
801 		.align = alignof(rte_eth_ip_reassembly_dynfield_t),
802 	};
803 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
804 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
805 	};
806 	int offset;
807 
808 	offset = rte_mbuf_dynfield_register(&field_desc);
809 	if (offset < 0)
810 		return -1;
811 	if (field_offset != NULL)
812 		*field_offset = offset;
813 
814 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
815 	if (offset < 0)
816 		return -1;
817 	if (flag_offset != NULL)
818 		*flag_offset = offset;
819 
820 	return 0;
821 }
822 
823 uint16_t
824 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
825 		struct rte_mbuf **pkts __rte_unused,
826 		uint16_t nb_pkts __rte_unused)
827 {
828 	return 0;
829 }
830 
831 int
832 rte_eth_representor_id_get(uint16_t port_id,
833 			   enum rte_eth_representor_type type,
834 			   int controller, int pf, int representor_port,
835 			   uint16_t *repr_id)
836 {
837 	int ret, n, count;
838 	uint32_t i;
839 	struct rte_eth_representor_info *info = NULL;
840 	size_t size;
841 
842 	if (type == RTE_ETH_REPRESENTOR_NONE)
843 		return 0;
844 	if (repr_id == NULL)
845 		return -EINVAL;
846 
847 	/* Get PMD representor range info. */
848 	ret = rte_eth_representor_info_get(port_id, NULL);
849 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
850 	    controller == -1 && pf == -1) {
851 		/* Direct mapping for legacy VF representor. */
852 		*repr_id = representor_port;
853 		return 0;
854 	} else if (ret < 0) {
855 		return ret;
856 	}
857 	n = ret;
858 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
859 	info = calloc(1, size);
860 	if (info == NULL)
861 		return -ENOMEM;
862 	info->nb_ranges_alloc = n;
863 	ret = rte_eth_representor_info_get(port_id, info);
864 	if (ret < 0)
865 		goto out;
866 
867 	/* Default controller and pf to caller. */
868 	if (controller == -1)
869 		controller = info->controller;
870 	if (pf == -1)
871 		pf = info->pf;
872 
873 	/* Locate representor ID. */
874 	ret = -ENOENT;
875 	for (i = 0; i < info->nb_ranges; ++i) {
876 		if (info->ranges[i].type != type)
877 			continue;
878 		if (info->ranges[i].controller != controller)
879 			continue;
880 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
881 			RTE_ETHDEV_LOG_LINE(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d",
882 				port_id, info->ranges[i].id_base,
883 				info->ranges[i].id_end, i);
884 			continue;
885 
886 		}
887 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
888 		switch (info->ranges[i].type) {
889 		case RTE_ETH_REPRESENTOR_PF:
890 			if (pf < info->ranges[i].pf ||
891 			    pf >= info->ranges[i].pf + count)
892 				continue;
893 			*repr_id = info->ranges[i].id_base +
894 				   (pf - info->ranges[i].pf);
895 			ret = 0;
896 			goto out;
897 		case RTE_ETH_REPRESENTOR_VF:
898 			if (info->ranges[i].pf != pf)
899 				continue;
900 			if (representor_port < info->ranges[i].vf ||
901 			    representor_port >= info->ranges[i].vf + count)
902 				continue;
903 			*repr_id = info->ranges[i].id_base +
904 				   (representor_port - info->ranges[i].vf);
905 			ret = 0;
906 			goto out;
907 		case RTE_ETH_REPRESENTOR_SF:
908 			if (info->ranges[i].pf != pf)
909 				continue;
910 			if (representor_port < info->ranges[i].sf ||
911 			    representor_port >= info->ranges[i].sf + count)
912 				continue;
913 			*repr_id = info->ranges[i].id_base +
914 			      (representor_port - info->ranges[i].sf);
915 			ret = 0;
916 			goto out;
917 		default:
918 			break;
919 		}
920 	}
921 out:
922 	free(info);
923 	return ret;
924 }
925 
926 int
927 rte_eth_switch_domain_alloc(uint16_t *domain_id)
928 {
929 	uint16_t i;
930 
931 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
932 
933 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
934 		if (eth_dev_switch_domains[i].state ==
935 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
936 			eth_dev_switch_domains[i].state =
937 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
938 			*domain_id = i;
939 			return 0;
940 		}
941 	}
942 
943 	return -ENOSPC;
944 }
945 
946 int
947 rte_eth_switch_domain_free(uint16_t domain_id)
948 {
949 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
950 		domain_id >= RTE_MAX_ETHPORTS)
951 		return -EINVAL;
952 
953 	if (eth_dev_switch_domains[domain_id].state !=
954 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
955 		return -EINVAL;
956 
957 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
958 
959 	return 0;
960 }
961 
962 uint64_t
963 rte_eth_get_restore_flags(struct rte_eth_dev *dev, enum rte_eth_dev_operation op)
964 {
965 	if (dev->dev_ops->get_restore_flags != NULL)
966 		return dev->dev_ops->get_restore_flags(dev, op);
967 	else
968 		return RTE_ETH_RESTORE_ALL;
969 }
970