xref: /dpdk/lib/ethdev/ethdev_driver.c (revision 0f1dc8cb671203d52488fd66936f2fe6dcca03cc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdalign.h>
7 #include <stdlib.h>
8 #include <pthread.h>
9 
10 #include <rte_kvargs.h>
11 #include <rte_malloc.h>
12 
13 #include "ethdev_driver.h"
14 #include "ethdev_private.h"
15 #include "rte_flow_driver.h"
16 
17 /**
18  * A set of values to describe the possible states of a switch domain.
19  */
20 enum rte_eth_switch_domain_state {
21 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
22 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
23 };
24 
25 /**
26  * Array of switch domains available for allocation. Array is sized to
27  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
28  * ethdev ports in a single process.
29  */
30 static struct rte_eth_dev_switch {
31 	enum rte_eth_switch_domain_state state;
32 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
33 
34 static struct rte_eth_dev *
35 eth_dev_allocated(const char *name)
36 {
37 	uint16_t i;
38 
39 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
40 
41 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
42 		if (rte_eth_devices[i].data != NULL &&
43 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
44 			return &rte_eth_devices[i];
45 	}
46 	return NULL;
47 }
48 
49 static uint16_t
50 eth_dev_find_free_port(void)
51 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
52 {
53 	uint16_t i;
54 
55 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
56 		/* Using shared name field to find a free port. */
57 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
58 			RTE_ASSERT(rte_eth_devices[i].state ==
59 				   RTE_ETH_DEV_UNUSED);
60 			return i;
61 		}
62 	}
63 	return RTE_MAX_ETHPORTS;
64 }
65 
66 static struct rte_eth_dev *
67 eth_dev_get(uint16_t port_id)
68 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
69 {
70 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
71 
72 	eth_dev->data = &eth_dev_shared_data->data[port_id];
73 
74 	return eth_dev;
75 }
76 
77 struct rte_eth_dev *
78 rte_eth_dev_allocate(const char *name)
79 {
80 	uint16_t port_id;
81 	struct rte_eth_dev *eth_dev = NULL;
82 	size_t name_len;
83 
84 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
85 	if (name_len == 0) {
86 		RTE_ETHDEV_LOG_LINE(ERR, "Zero length Ethernet device name");
87 		return NULL;
88 	}
89 
90 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
91 		RTE_ETHDEV_LOG_LINE(ERR, "Ethernet device name is too long");
92 		return NULL;
93 	}
94 
95 	/* Synchronize port creation between primary and secondary processes. */
96 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
97 
98 	if (eth_dev_shared_data_prepare() == NULL)
99 		goto unlock;
100 
101 	if (eth_dev_allocated(name) != NULL) {
102 		RTE_ETHDEV_LOG_LINE(ERR,
103 			"Ethernet device with name %s already allocated",
104 			name);
105 		goto unlock;
106 	}
107 
108 	port_id = eth_dev_find_free_port();
109 	if (port_id == RTE_MAX_ETHPORTS) {
110 		RTE_ETHDEV_LOG_LINE(ERR,
111 			"Reached maximum number of Ethernet ports");
112 		goto unlock;
113 	}
114 
115 	eth_dev = eth_dev_get(port_id);
116 	eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
117 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
118 	eth_dev->data->port_id = port_id;
119 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
120 	eth_dev->data->mtu = RTE_ETHER_MTU;
121 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
122 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
123 	eth_dev_shared_data->allocated_ports++;
124 
125 unlock:
126 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
127 
128 	return eth_dev;
129 }
130 
131 struct rte_eth_dev *
132 rte_eth_dev_allocated(const char *name)
133 {
134 	struct rte_eth_dev *ethdev;
135 
136 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
137 
138 	if (eth_dev_shared_data_prepare() != NULL)
139 		ethdev = eth_dev_allocated(name);
140 	else
141 		ethdev = NULL;
142 
143 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
144 
145 	return ethdev;
146 }
147 
148 /*
149  * Attach to a port already registered by the primary process, which
150  * makes sure that the same device would have the same port ID both
151  * in the primary and secondary process.
152  */
153 struct rte_eth_dev *
154 rte_eth_dev_attach_secondary(const char *name)
155 {
156 	uint16_t i;
157 	struct rte_eth_dev *eth_dev = NULL;
158 
159 	/* Synchronize port attachment to primary port creation and release. */
160 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
161 
162 	if (eth_dev_shared_data_prepare() == NULL)
163 		goto unlock;
164 
165 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
166 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
167 			break;
168 	}
169 	if (i == RTE_MAX_ETHPORTS) {
170 		RTE_ETHDEV_LOG_LINE(ERR,
171 			"Device %s is not driven by the primary process",
172 			name);
173 	} else {
174 		eth_dev = eth_dev_get(i);
175 		RTE_ASSERT(eth_dev->data->port_id == i);
176 	}
177 
178 unlock:
179 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
180 	return eth_dev;
181 }
182 
183 int
184 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
185 	enum rte_eth_event_type event, void *ret_param)
186 {
187 	struct rte_eth_dev_callback *cb_lst;
188 	struct rte_eth_dev_callback dev_cb;
189 	int rc = 0;
190 
191 	rte_spinlock_lock(&eth_dev_cb_lock);
192 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
193 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
194 			continue;
195 		dev_cb = *cb_lst;
196 		cb_lst->active = 1;
197 		if (ret_param != NULL)
198 			dev_cb.ret_param = ret_param;
199 
200 		rte_spinlock_unlock(&eth_dev_cb_lock);
201 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
202 				dev_cb.cb_arg, dev_cb.ret_param);
203 		rte_spinlock_lock(&eth_dev_cb_lock);
204 		cb_lst->active = 0;
205 	}
206 	rte_spinlock_unlock(&eth_dev_cb_lock);
207 	return rc;
208 }
209 
210 void
211 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
212 {
213 	if (dev == NULL)
214 		return;
215 
216 	/*
217 	 * for secondary process, at that point we expect device
218 	 * to be already 'usable', so shared data and all function pointers
219 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
220 	 */
221 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
222 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
223 
224 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
225 
226 	dev->state = RTE_ETH_DEV_ATTACHED;
227 }
228 
229 int
230 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
231 {
232 	int ret;
233 
234 	if (eth_dev == NULL)
235 		return -EINVAL;
236 
237 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
238 	if (eth_dev_shared_data_prepare() == NULL)
239 		ret = -EINVAL;
240 	else
241 		ret = 0;
242 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
243 	if (ret != 0)
244 		return ret;
245 
246 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
247 		rte_eth_dev_callback_process(eth_dev,
248 				RTE_ETH_EVENT_DESTROY, NULL);
249 
250 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
251 
252 	eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
253 
254 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
255 
256 	eth_dev->state = RTE_ETH_DEV_UNUSED;
257 	eth_dev->device = NULL;
258 	eth_dev->process_private = NULL;
259 	eth_dev->intr_handle = NULL;
260 	eth_dev->rx_pkt_burst = NULL;
261 	eth_dev->tx_pkt_burst = NULL;
262 	eth_dev->tx_pkt_prepare = NULL;
263 	eth_dev->rx_queue_count = NULL;
264 	eth_dev->rx_descriptor_status = NULL;
265 	eth_dev->tx_descriptor_status = NULL;
266 	eth_dev->dev_ops = NULL;
267 
268 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
269 		rte_free(eth_dev->data->rx_queues);
270 		rte_free(eth_dev->data->tx_queues);
271 		rte_free(eth_dev->data->mac_addrs);
272 		rte_free(eth_dev->data->hash_mac_addrs);
273 		rte_free(eth_dev->data->dev_private);
274 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
275 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
276 		eth_dev->data = NULL;
277 
278 		eth_dev_shared_data->allocated_ports--;
279 		eth_dev_shared_data_release();
280 	}
281 
282 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
283 
284 	return 0;
285 }
286 
287 int
288 rte_eth_dev_create(struct rte_device *device, const char *name,
289 	size_t priv_data_size,
290 	ethdev_bus_specific_init ethdev_bus_specific_init,
291 	void *bus_init_params,
292 	ethdev_init_t ethdev_init, void *init_params)
293 {
294 	struct rte_eth_dev *ethdev;
295 	int retval;
296 
297 	if (*ethdev_init == NULL)
298 		return -EINVAL;
299 
300 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
301 		ethdev = rte_eth_dev_allocate(name);
302 		if (!ethdev)
303 			return -ENODEV;
304 
305 		if (priv_data_size) {
306 			ethdev->data->dev_private = rte_zmalloc_socket(
307 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
308 				device->numa_node);
309 
310 			if (!ethdev->data->dev_private) {
311 				RTE_ETHDEV_LOG_LINE(ERR,
312 					"failed to allocate private data");
313 				retval = -ENOMEM;
314 				goto probe_failed;
315 			}
316 		}
317 	} else {
318 		ethdev = rte_eth_dev_attach_secondary(name);
319 		if (!ethdev) {
320 			RTE_ETHDEV_LOG_LINE(ERR,
321 				"secondary process attach failed, ethdev doesn't exist");
322 			return  -ENODEV;
323 		}
324 	}
325 
326 	ethdev->device = device;
327 
328 	if (ethdev_bus_specific_init) {
329 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
330 		if (retval) {
331 			RTE_ETHDEV_LOG_LINE(ERR,
332 				"ethdev bus specific initialisation failed");
333 			goto probe_failed;
334 		}
335 	}
336 
337 	retval = ethdev_init(ethdev, init_params);
338 	if (retval) {
339 		RTE_ETHDEV_LOG_LINE(ERR, "ethdev initialisation failed");
340 		goto probe_failed;
341 	}
342 
343 	rte_eth_dev_probing_finish(ethdev);
344 
345 	return retval;
346 
347 probe_failed:
348 	rte_eth_dev_release_port(ethdev);
349 	return retval;
350 }
351 
352 int
353 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
354 	ethdev_uninit_t ethdev_uninit)
355 {
356 	int ret;
357 
358 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
359 	if (!ethdev)
360 		return -ENODEV;
361 
362 	if (*ethdev_uninit == NULL)
363 		return -EINVAL;
364 
365 	ret = ethdev_uninit(ethdev);
366 	if (ret)
367 		return ret;
368 
369 	return rte_eth_dev_release_port(ethdev);
370 }
371 
372 struct rte_eth_dev *
373 rte_eth_dev_get_by_name(const char *name)
374 {
375 	uint16_t pid;
376 
377 	if (rte_eth_dev_get_port_by_name(name, &pid))
378 		return NULL;
379 
380 	return &rte_eth_devices[pid];
381 }
382 
383 int
384 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
385 {
386 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
387 		return 1;
388 	return 0;
389 }
390 
391 int
392 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
393 {
394 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
395 		return 1;
396 	return 0;
397 }
398 
399 void
400 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
401 {
402 	if (dev->data->dev_started) {
403 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u must be stopped to allow reset",
404 			dev->data->port_id);
405 		return;
406 	}
407 
408 	eth_dev_rx_queue_config(dev, 0);
409 	eth_dev_tx_queue_config(dev, 0);
410 
411 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
412 }
413 
414 static int
415 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
416 {
417 	int state;
418 	struct rte_kvargs_pair *pair;
419 	char *letter;
420 
421 	arglist->str = strdup(str_in);
422 	if (arglist->str == NULL)
423 		return -ENOMEM;
424 
425 	letter = arglist->str;
426 	state = 0;
427 	arglist->count = 0;
428 	pair = &arglist->pairs[0];
429 	while (1) {
430 		switch (state) {
431 		case 0: /* Initial */
432 			if (*letter == '=')
433 				return -EINVAL;
434 			else if (*letter == '\0')
435 				return 0;
436 
437 			state = 1;
438 			pair->key = letter;
439 			/* fallthrough */
440 
441 		case 1: /* Parsing key */
442 			if (*letter == '=') {
443 				*letter = '\0';
444 				pair->value = letter + 1;
445 				state = 2;
446 			} else if (*letter == ',' || *letter == '\0')
447 				return -EINVAL;
448 			break;
449 
450 
451 		case 2: /* Parsing value */
452 			if (*letter == '[')
453 				state = 3;
454 			else if (*letter == ',') {
455 				*letter = '\0';
456 				arglist->count++;
457 				pair = &arglist->pairs[arglist->count];
458 				state = 0;
459 			} else if (*letter == '\0') {
460 				letter--;
461 				arglist->count++;
462 				pair = &arglist->pairs[arglist->count];
463 				state = 0;
464 			}
465 			break;
466 
467 		case 3: /* Parsing list */
468 			if (*letter == ']') {
469 				/* For devargs having singles lists move to state 2 once letter
470 				 * becomes ']' so each can be considered as different pair key
471 				 * value. But in nested lists case e.g. multiple representors
472 				 * case i.e. [pf[0-3],pfvf[3,4-6]], complete nested list should
473 				 * be considered as one pair value, hence checking if end of outer
474 				 * list ']' is reached else stay on state 3.
475 				 */
476 				if ((strcmp("representor", pair->key) == 0) &&
477 				    (*(letter + 1) != '\0' && *(letter + 2) != '\0' &&
478 				     *(letter + 3) != '\0')			    &&
479 				    ((*(letter + 2) == 'p' && *(letter + 3) == 'f')   ||
480 				     (*(letter + 2) == 'v' && *(letter + 3) == 'f')   ||
481 				     (*(letter + 2) == 's' && *(letter + 3) == 'f')   ||
482 				     (*(letter + 2) == 'c' && isdigit(*(letter + 3))) ||
483 				     (*(letter + 2) == '[' && isdigit(*(letter + 3))) ||
484 				     (isdigit(*(letter + 2)))))
485 					state = 3;
486 				else
487 					state = 2;
488 			} else if (*letter == '\0') {
489 				return -EINVAL;
490 			}
491 			break;
492 		}
493 		letter++;
494 	}
495 }
496 
497 static int
498 devargs_parse_representor_ports(struct rte_eth_devargs *eth_devargs, char
499 				*da_val, unsigned int da_idx, unsigned int nb_da)
500 {
501 	struct rte_eth_devargs *eth_da;
502 	int result = 0;
503 
504 	if (da_idx + 1 > nb_da) {
505 		RTE_ETHDEV_LOG_LINE(ERR, "Devargs parsed %d > max array size %d",
506 			       da_idx + 1, nb_da);
507 		result = -1;
508 		goto parse_cleanup;
509 	}
510 	eth_da = &eth_devargs[da_idx];
511 	memset(eth_da, 0, sizeof(*eth_da));
512 	RTE_ETHDEV_LOG_LINE(DEBUG, "	  Devargs idx %d value %s", da_idx, da_val);
513 	result = rte_eth_devargs_parse_representor_ports(da_val, eth_da);
514 
515 parse_cleanup:
516 	return result;
517 }
518 
519 static int
520 eth_dev_tokenise_representor_list(char *p_val, struct rte_eth_devargs *eth_devargs,
521 				  unsigned int nb_da)
522 {
523 	char da_val[BUFSIZ], str[BUFSIZ];
524 	bool is_rep_portid_list = true;
525 	unsigned int devargs = 0;
526 	int result = 0, len = 0;
527 	int i = 0, j = 0;
528 	char *pos;
529 
530 	pos = p_val;
531 	/* Length of consolidated list */
532 	while (*pos++ != '\0') {
533 		len++;
534 		if (isalpha(*pos))
535 			is_rep_portid_list = false;
536 	}
537 
538 	/* List of representor portIDs i.e.[1,2,3] should be considered as single representor case*/
539 	if (is_rep_portid_list) {
540 		result = devargs_parse_representor_ports(eth_devargs, p_val, 0, 1);
541 		if (result < 0)
542 			return result;
543 
544 		devargs++;
545 		return devargs;
546 	}
547 
548 	memset(str, 0, BUFSIZ);
549 	/* Remove the exterior [] of the consolidated list */
550 	strncpy(str, &p_val[1], len - 2);
551 	while (1) {
552 		if (str[i] == '\0') {
553 			if (da_val[0] != '\0') {
554 				result = devargs_parse_representor_ports(eth_devargs, da_val,
555 									 devargs, nb_da);
556 				if (result < 0)
557 					goto parse_cleanup;
558 
559 				devargs++;
560 			}
561 			break;
562 		}
563 		if (str[i] == ',' || str[i] == '[') {
564 			if (str[i] == ',') {
565 				if (da_val[0] != '\0') {
566 					da_val[j + 1] = '\0';
567 					result = devargs_parse_representor_ports(eth_devargs,
568 										 da_val, devargs,
569 										 nb_da);
570 					if (result < 0)
571 						goto parse_cleanup;
572 
573 					devargs++;
574 					j = 0;
575 					memset(da_val, 0, BUFSIZ);
576 				}
577 			}
578 
579 			if (str[i] == '[') {
580 				while (str[i] != ']' || isalpha(str[i + 1])) {
581 					da_val[j] = str[i];
582 					j++;
583 					i++;
584 				}
585 				da_val[j] = ']';
586 				da_val[j + 1] = '\0';
587 				result = devargs_parse_representor_ports(eth_devargs, da_val,
588 									 devargs, nb_da);
589 				if (result < 0)
590 					goto parse_cleanup;
591 
592 				devargs++;
593 				j = 0;
594 				memset(da_val, 0, BUFSIZ);
595 			}
596 		} else {
597 			da_val[j] = str[i];
598 			j++;
599 		}
600 		i++;
601 	}
602 	result = devargs;
603 
604 parse_cleanup:
605 	return result;
606 }
607 
608 int
609 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_devargs,
610 		      unsigned int nb_da)
611 {
612 	struct rte_kvargs_pair *pair;
613 	struct rte_kvargs args;
614 	bool dup_rep = false;
615 	int devargs = 0;
616 	unsigned int i;
617 	int result = 0;
618 
619 	result = eth_dev_devargs_tokenise(&args, dargs);
620 	if (result < 0)
621 		goto parse_cleanup;
622 
623 	for (i = 0; i < args.count; i++) {
624 		pair = &args.pairs[i];
625 		if (strcmp("representor", pair->key) == 0) {
626 			if (dup_rep) {
627 				RTE_ETHDEV_LOG_LINE(ERR, "Duplicated representor key: %s",
628 						    pair->value);
629 				result = -1;
630 				goto parse_cleanup;
631 			}
632 
633 			RTE_ETHDEV_LOG_LINE(DEBUG, "Devarg pattern: %s", pair->value);
634 			if (pair->value[0] == '[') {
635 				/* Multiple representor list case */
636 				devargs = eth_dev_tokenise_representor_list(pair->value,
637 									    eth_devargs, nb_da);
638 				if (devargs < 0)
639 					goto parse_cleanup;
640 			} else {
641 				/* Single representor case */
642 				devargs = devargs_parse_representor_ports(eth_devargs, pair->value,
643 									  0, 1);
644 				if (devargs < 0)
645 					goto parse_cleanup;
646 				devargs++;
647 			}
648 			dup_rep = true;
649 		}
650 	}
651 	RTE_ETHDEV_LOG_LINE(DEBUG, "Total devargs parsed %d", devargs);
652 	result = devargs;
653 
654 parse_cleanup:
655 	free(args.str);
656 
657 	return result;
658 }
659 
660 static inline int
661 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
662 		const char *ring_name)
663 {
664 	return snprintf(name, len, "eth_p%d_q%d_%s",
665 			port_id, queue_id, ring_name);
666 }
667 
668 int
669 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
670 		uint16_t queue_id)
671 {
672 	char z_name[RTE_MEMZONE_NAMESIZE];
673 	const struct rte_memzone *mz;
674 	int rc = 0;
675 
676 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
677 			queue_id, ring_name);
678 	if (rc >= RTE_MEMZONE_NAMESIZE) {
679 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
680 		return -ENAMETOOLONG;
681 	}
682 
683 	mz = rte_memzone_lookup(z_name);
684 	if (mz)
685 		rc = rte_memzone_free(mz);
686 	else
687 		rc = -ENOENT;
688 
689 	return rc;
690 }
691 
692 const struct rte_memzone *
693 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
694 			 uint16_t queue_id, size_t size, unsigned int align,
695 			 int socket_id)
696 {
697 	char z_name[RTE_MEMZONE_NAMESIZE];
698 	const struct rte_memzone *mz;
699 	int rc;
700 
701 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
702 			queue_id, ring_name);
703 	if (rc >= RTE_MEMZONE_NAMESIZE) {
704 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
705 		rte_errno = ENAMETOOLONG;
706 		return NULL;
707 	}
708 
709 	mz = rte_memzone_lookup(z_name);
710 	if (mz) {
711 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
712 				size > mz->len ||
713 				((uintptr_t)mz->addr & (align - 1)) != 0) {
714 			RTE_ETHDEV_LOG_LINE(ERR,
715 				"memzone %s does not justify the requested attributes",
716 				mz->name);
717 			return NULL;
718 		}
719 
720 		return mz;
721 	}
722 
723 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
724 			RTE_MEMZONE_IOVA_CONTIG, align);
725 }
726 
727 int
728 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
729 				struct rte_hairpin_peer_info *peer_info,
730 				uint32_t direction)
731 {
732 	struct rte_eth_dev *dev;
733 
734 	if (peer_info == NULL)
735 		return -EINVAL;
736 
737 	/* No need to check the validity again. */
738 	dev = &rte_eth_devices[cur_port];
739 	if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
740 		return -ENOTSUP;
741 
742 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
743 							peer_info, direction);
744 }
745 
746 int
747 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
748 				  uint32_t direction)
749 {
750 	struct rte_eth_dev *dev;
751 
752 	/* No need to check the validity again. */
753 	dev = &rte_eth_devices[cur_port];
754 	if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
755 		return -ENOTSUP;
756 
757 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
758 							  direction);
759 }
760 
761 int
762 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
763 				  struct rte_hairpin_peer_info *cur_info,
764 				  struct rte_hairpin_peer_info *peer_info,
765 				  uint32_t direction)
766 {
767 	struct rte_eth_dev *dev;
768 
769 	/* Current queue information is not mandatory. */
770 	if (peer_info == NULL)
771 		return -EINVAL;
772 
773 	/* No need to check the validity again. */
774 	dev = &rte_eth_devices[peer_port];
775 	if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
776 		return -ENOTSUP;
777 
778 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
779 					cur_info, peer_info, direction);
780 }
781 
782 int
783 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
784 {
785 	static const struct rte_mbuf_dynfield field_desc = {
786 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
787 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
788 		.align = alignof(rte_eth_ip_reassembly_dynfield_t),
789 	};
790 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
791 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
792 	};
793 	int offset;
794 
795 	offset = rte_mbuf_dynfield_register(&field_desc);
796 	if (offset < 0)
797 		return -1;
798 	if (field_offset != NULL)
799 		*field_offset = offset;
800 
801 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
802 	if (offset < 0)
803 		return -1;
804 	if (flag_offset != NULL)
805 		*flag_offset = offset;
806 
807 	return 0;
808 }
809 
810 uint16_t
811 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
812 		struct rte_mbuf **pkts __rte_unused,
813 		uint16_t nb_pkts __rte_unused)
814 {
815 	return 0;
816 }
817 
818 int
819 rte_eth_representor_id_get(uint16_t port_id,
820 			   enum rte_eth_representor_type type,
821 			   int controller, int pf, int representor_port,
822 			   uint16_t *repr_id)
823 {
824 	int ret, n, count;
825 	uint32_t i;
826 	struct rte_eth_representor_info *info = NULL;
827 	size_t size;
828 
829 	if (type == RTE_ETH_REPRESENTOR_NONE)
830 		return 0;
831 	if (repr_id == NULL)
832 		return -EINVAL;
833 
834 	/* Get PMD representor range info. */
835 	ret = rte_eth_representor_info_get(port_id, NULL);
836 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
837 	    controller == -1 && pf == -1) {
838 		/* Direct mapping for legacy VF representor. */
839 		*repr_id = representor_port;
840 		return 0;
841 	} else if (ret < 0) {
842 		return ret;
843 	}
844 	n = ret;
845 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
846 	info = calloc(1, size);
847 	if (info == NULL)
848 		return -ENOMEM;
849 	info->nb_ranges_alloc = n;
850 	ret = rte_eth_representor_info_get(port_id, info);
851 	if (ret < 0)
852 		goto out;
853 
854 	/* Default controller and pf to caller. */
855 	if (controller == -1)
856 		controller = info->controller;
857 	if (pf == -1)
858 		pf = info->pf;
859 
860 	/* Locate representor ID. */
861 	ret = -ENOENT;
862 	for (i = 0; i < info->nb_ranges; ++i) {
863 		if (info->ranges[i].type != type)
864 			continue;
865 		if (info->ranges[i].controller != controller)
866 			continue;
867 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
868 			RTE_ETHDEV_LOG_LINE(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d",
869 				port_id, info->ranges[i].id_base,
870 				info->ranges[i].id_end, i);
871 			continue;
872 
873 		}
874 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
875 		switch (info->ranges[i].type) {
876 		case RTE_ETH_REPRESENTOR_PF:
877 			if (pf < info->ranges[i].pf ||
878 			    pf >= info->ranges[i].pf + count)
879 				continue;
880 			*repr_id = info->ranges[i].id_base +
881 				   (pf - info->ranges[i].pf);
882 			ret = 0;
883 			goto out;
884 		case RTE_ETH_REPRESENTOR_VF:
885 			if (info->ranges[i].pf != pf)
886 				continue;
887 			if (representor_port < info->ranges[i].vf ||
888 			    representor_port >= info->ranges[i].vf + count)
889 				continue;
890 			*repr_id = info->ranges[i].id_base +
891 				   (representor_port - info->ranges[i].vf);
892 			ret = 0;
893 			goto out;
894 		case RTE_ETH_REPRESENTOR_SF:
895 			if (info->ranges[i].pf != pf)
896 				continue;
897 			if (representor_port < info->ranges[i].sf ||
898 			    representor_port >= info->ranges[i].sf + count)
899 				continue;
900 			*repr_id = info->ranges[i].id_base +
901 			      (representor_port - info->ranges[i].sf);
902 			ret = 0;
903 			goto out;
904 		default:
905 			break;
906 		}
907 	}
908 out:
909 	free(info);
910 	return ret;
911 }
912 
913 int
914 rte_eth_switch_domain_alloc(uint16_t *domain_id)
915 {
916 	uint16_t i;
917 
918 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
919 
920 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
921 		if (eth_dev_switch_domains[i].state ==
922 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
923 			eth_dev_switch_domains[i].state =
924 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
925 			*domain_id = i;
926 			return 0;
927 		}
928 	}
929 
930 	return -ENOSPC;
931 }
932 
933 int
934 rte_eth_switch_domain_free(uint16_t domain_id)
935 {
936 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
937 		domain_id >= RTE_MAX_ETHPORTS)
938 		return -EINVAL;
939 
940 	if (eth_dev_switch_domains[domain_id].state !=
941 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
942 		return -EINVAL;
943 
944 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
945 
946 	return 0;
947 }
948