xref: /dpdk/lib/ethdev/ethdev_driver.c (revision c6552d9a8deffa448de2d5e2e726f50508c1efd2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdalign.h>
7 #include <stdlib.h>
8 #include <pthread.h>
9 
10 #include <rte_kvargs.h>
11 #include <rte_malloc.h>
12 
13 #include "ethdev_driver.h"
14 #include "ethdev_private.h"
15 #include "rte_flow_driver.h"
16 
17 /**
18  * A set of values to describe the possible states of a switch domain.
19  */
20 enum rte_eth_switch_domain_state {
21 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
22 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
23 };
24 
25 /**
26  * Array of switch domains available for allocation. Array is sized to
27  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
28  * ethdev ports in a single process.
29  */
30 static struct rte_eth_dev_switch {
31 	enum rte_eth_switch_domain_state state;
32 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
33 
34 static struct rte_eth_dev *
35 eth_dev_allocated(const char *name)
36 {
37 	uint16_t i;
38 
39 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
40 
41 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
42 		if (rte_eth_devices[i].data != NULL &&
43 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
44 			return &rte_eth_devices[i];
45 	}
46 	return NULL;
47 }
48 
49 static uint16_t
50 eth_dev_find_free_port(void)
51 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
52 {
53 	uint16_t i;
54 
55 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
56 		/* Using shared name field to find a free port. */
57 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
58 			RTE_ASSERT(rte_eth_devices[i].state ==
59 				   RTE_ETH_DEV_UNUSED);
60 			return i;
61 		}
62 	}
63 	return RTE_MAX_ETHPORTS;
64 }
65 
66 static struct rte_eth_dev *
67 eth_dev_get(uint16_t port_id)
68 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
69 {
70 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
71 
72 	eth_dev->data = &eth_dev_shared_data->data[port_id];
73 
74 	return eth_dev;
75 }
76 
77 struct rte_eth_dev *
78 rte_eth_dev_allocate(const char *name)
79 {
80 	uint16_t port_id;
81 	struct rte_eth_dev *eth_dev = NULL;
82 	size_t name_len;
83 
84 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
85 	if (name_len == 0) {
86 		RTE_ETHDEV_LOG_LINE(ERR, "Zero length Ethernet device name");
87 		return NULL;
88 	}
89 
90 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
91 		RTE_ETHDEV_LOG_LINE(ERR, "Ethernet device name is too long");
92 		return NULL;
93 	}
94 
95 	/* Synchronize port creation between primary and secondary processes. */
96 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
97 
98 	if (eth_dev_shared_data_prepare() == NULL)
99 		goto unlock;
100 
101 	if (eth_dev_allocated(name) != NULL) {
102 		RTE_ETHDEV_LOG_LINE(ERR,
103 			"Ethernet device with name %s already allocated",
104 			name);
105 		goto unlock;
106 	}
107 
108 	port_id = eth_dev_find_free_port();
109 	if (port_id == RTE_MAX_ETHPORTS) {
110 		RTE_ETHDEV_LOG_LINE(ERR,
111 			"Reached maximum number of Ethernet ports");
112 		goto unlock;
113 	}
114 
115 	eth_dev = eth_dev_get(port_id);
116 	eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
117 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
118 	eth_dev->data->port_id = port_id;
119 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
120 	eth_dev->data->mtu = RTE_ETHER_MTU;
121 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
122 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
123 	eth_dev_shared_data->allocated_ports++;
124 
125 unlock:
126 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
127 
128 	return eth_dev;
129 }
130 
131 struct rte_eth_dev *
132 rte_eth_dev_allocated(const char *name)
133 {
134 	struct rte_eth_dev *ethdev;
135 
136 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
137 
138 	if (eth_dev_shared_data_prepare() != NULL)
139 		ethdev = eth_dev_allocated(name);
140 	else
141 		ethdev = NULL;
142 
143 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
144 
145 	return ethdev;
146 }
147 
148 /*
149  * Attach to a port already registered by the primary process, which
150  * makes sure that the same device would have the same port ID both
151  * in the primary and secondary process.
152  */
153 struct rte_eth_dev *
154 rte_eth_dev_attach_secondary(const char *name)
155 {
156 	uint16_t i;
157 	struct rte_eth_dev *eth_dev = NULL;
158 
159 	/* Synchronize port attachment to primary port creation and release. */
160 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
161 
162 	if (eth_dev_shared_data_prepare() == NULL)
163 		goto unlock;
164 
165 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
166 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
167 			break;
168 	}
169 	if (i == RTE_MAX_ETHPORTS) {
170 		RTE_ETHDEV_LOG_LINE(ERR,
171 			"Device %s is not driven by the primary process",
172 			name);
173 	} else {
174 		eth_dev = eth_dev_get(i);
175 		RTE_ASSERT(eth_dev->data->port_id == i);
176 	}
177 
178 unlock:
179 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
180 	return eth_dev;
181 }
182 
183 int
184 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
185 	enum rte_eth_event_type event, void *ret_param)
186 {
187 	struct rte_eth_dev_callback *cb_lst;
188 	struct rte_eth_dev_callback dev_cb;
189 	int rc = 0;
190 
191 	rte_spinlock_lock(&eth_dev_cb_lock);
192 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
193 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
194 			continue;
195 		dev_cb = *cb_lst;
196 		cb_lst->active = 1;
197 		if (ret_param != NULL)
198 			dev_cb.ret_param = ret_param;
199 
200 		rte_spinlock_unlock(&eth_dev_cb_lock);
201 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
202 				dev_cb.cb_arg, dev_cb.ret_param);
203 		rte_spinlock_lock(&eth_dev_cb_lock);
204 		cb_lst->active = 0;
205 	}
206 	rte_spinlock_unlock(&eth_dev_cb_lock);
207 	return rc;
208 }
209 
210 void
211 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
212 {
213 	if (dev == NULL)
214 		return;
215 
216 	/*
217 	 * for secondary process, at that point we expect device
218 	 * to be already 'usable', so shared data and all function pointers
219 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
220 	 */
221 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
222 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
223 
224 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
225 
226 	dev->state = RTE_ETH_DEV_ATTACHED;
227 }
228 
229 int
230 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
231 {
232 	int ret;
233 
234 	if (eth_dev == NULL)
235 		return -EINVAL;
236 
237 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
238 	if (eth_dev_shared_data_prepare() == NULL)
239 		ret = -EINVAL;
240 	else
241 		ret = 0;
242 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
243 	if (ret != 0)
244 		return ret;
245 
246 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
247 		rte_eth_dev_callback_process(eth_dev,
248 				RTE_ETH_EVENT_DESTROY, NULL);
249 
250 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
251 
252 	eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
253 
254 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
255 
256 	eth_dev->state = RTE_ETH_DEV_UNUSED;
257 	eth_dev->device = NULL;
258 	eth_dev->process_private = NULL;
259 	eth_dev->intr_handle = NULL;
260 	eth_dev->rx_pkt_burst = NULL;
261 	eth_dev->tx_pkt_burst = NULL;
262 	eth_dev->tx_pkt_prepare = NULL;
263 	eth_dev->rx_queue_count = NULL;
264 	eth_dev->rx_descriptor_status = NULL;
265 	eth_dev->tx_descriptor_status = NULL;
266 	eth_dev->dev_ops = NULL;
267 
268 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
269 		rte_free(eth_dev->data->rx_queues);
270 		rte_free(eth_dev->data->tx_queues);
271 		rte_free(eth_dev->data->mac_addrs);
272 		rte_free(eth_dev->data->hash_mac_addrs);
273 		rte_free(eth_dev->data->dev_private);
274 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
275 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
276 		eth_dev->data = NULL;
277 
278 		eth_dev_shared_data->allocated_ports--;
279 		eth_dev_shared_data_release();
280 	}
281 
282 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
283 
284 	return 0;
285 }
286 
287 int
288 rte_eth_dev_create(struct rte_device *device, const char *name,
289 	size_t priv_data_size,
290 	ethdev_bus_specific_init ethdev_bus_specific_init,
291 	void *bus_init_params,
292 	ethdev_init_t ethdev_init, void *init_params)
293 {
294 	struct rte_eth_dev *ethdev;
295 	int retval;
296 
297 	if (*ethdev_init == NULL)
298 		return -EINVAL;
299 
300 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
301 		ethdev = rte_eth_dev_allocate(name);
302 		if (!ethdev)
303 			return -ENODEV;
304 
305 		if (priv_data_size) {
306 			ethdev->data->dev_private = rte_zmalloc_socket(
307 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
308 				device->numa_node);
309 
310 			if (!ethdev->data->dev_private) {
311 				RTE_ETHDEV_LOG_LINE(ERR,
312 					"failed to allocate private data");
313 				retval = -ENOMEM;
314 				goto probe_failed;
315 			}
316 		}
317 	} else {
318 		ethdev = rte_eth_dev_attach_secondary(name);
319 		if (!ethdev) {
320 			RTE_ETHDEV_LOG_LINE(ERR,
321 				"secondary process attach failed, ethdev doesn't exist");
322 			return  -ENODEV;
323 		}
324 	}
325 
326 	ethdev->device = device;
327 
328 	if (ethdev_bus_specific_init) {
329 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
330 		if (retval) {
331 			RTE_ETHDEV_LOG_LINE(ERR,
332 				"ethdev bus specific initialisation failed");
333 			goto probe_failed;
334 		}
335 	}
336 
337 	retval = ethdev_init(ethdev, init_params);
338 	if (retval) {
339 		RTE_ETHDEV_LOG_LINE(ERR, "ethdev initialisation failed");
340 		goto probe_failed;
341 	}
342 
343 	rte_eth_dev_probing_finish(ethdev);
344 
345 	return retval;
346 
347 probe_failed:
348 	rte_eth_dev_release_port(ethdev);
349 	return retval;
350 }
351 
352 int
353 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
354 	ethdev_uninit_t ethdev_uninit)
355 {
356 	int ret;
357 
358 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
359 	if (!ethdev)
360 		return -ENODEV;
361 
362 	if (*ethdev_uninit == NULL)
363 		return -EINVAL;
364 
365 	ret = ethdev_uninit(ethdev);
366 	if (ret)
367 		return ret;
368 
369 	return rte_eth_dev_release_port(ethdev);
370 }
371 
372 struct rte_eth_dev *
373 rte_eth_dev_get_by_name(const char *name)
374 {
375 	uint16_t pid;
376 
377 	if (rte_eth_dev_get_port_by_name(name, &pid))
378 		return NULL;
379 
380 	return &rte_eth_devices[pid];
381 }
382 
383 int
384 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
385 {
386 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
387 		return 1;
388 	return 0;
389 }
390 
391 int
392 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
393 {
394 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
395 		return 1;
396 	return 0;
397 }
398 
399 void
400 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
401 {
402 	if (dev->data->dev_started) {
403 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u must be stopped to allow reset",
404 			dev->data->port_id);
405 		return;
406 	}
407 
408 	eth_dev_rx_queue_config(dev, 0);
409 	eth_dev_tx_queue_config(dev, 0);
410 
411 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
412 }
413 
414 static int
415 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
416 {
417 	int state;
418 	struct rte_kvargs_pair *pair;
419 	char *letter;
420 
421 	arglist->str = strdup(str_in);
422 	if (arglist->str == NULL)
423 		return -ENOMEM;
424 
425 	letter = arglist->str;
426 	state = 0;
427 	arglist->count = 0;
428 	pair = &arglist->pairs[0];
429 	while (1) {
430 		switch (state) {
431 		case 0: /* Initial */
432 			if (*letter == '=')
433 				return -EINVAL;
434 			else if (*letter == '\0')
435 				return 0;
436 
437 			state = 1;
438 			pair->key = letter;
439 			/* fallthrough */
440 
441 		case 1: /* Parsing key */
442 			if (*letter == '=') {
443 				*letter = '\0';
444 				pair->value = letter + 1;
445 				state = 2;
446 			} else if (*letter == ',' || *letter == '\0')
447 				return -EINVAL;
448 			break;
449 
450 
451 		case 2: /* Parsing value */
452 			if (*letter == '[')
453 				state = 3;
454 			else if (*letter == ',') {
455 				*letter = '\0';
456 				arglist->count++;
457 				pair = &arglist->pairs[arglist->count];
458 				state = 0;
459 			} else if (*letter == '\0') {
460 				letter--;
461 				arglist->count++;
462 				pair = &arglist->pairs[arglist->count];
463 				state = 0;
464 			}
465 			break;
466 
467 		case 3: /* Parsing list */
468 			if (*letter == ']') {
469 				/* For devargs having singles lists move to state 2 once letter
470 				 * becomes ']' so each can be considered as different pair key
471 				 * value. But in nested lists case e.g. multiple representors
472 				 * case i.e. [pf[0-3],pfvf[3,4-6]], complete nested list should
473 				 * be considered as one pair value, hence checking if end of outer
474 				 * list ']' is reached else stay on state 3.
475 				 */
476 				if ((strcmp("representor", pair->key) == 0) &&
477 				    (*(letter + 1) != '\0' && *(letter + 2) != '\0' &&
478 				     *(letter + 3) != '\0')			    &&
479 				    ((*(letter + 2) == 'p' && *(letter + 3) == 'f')   ||
480 				     (*(letter + 2) == 'v' && *(letter + 3) == 'f')   ||
481 				     (*(letter + 2) == 's' && *(letter + 3) == 'f')   ||
482 				     (*(letter + 2) == 'c' && isdigit(*(letter + 3))) ||
483 				     (*(letter + 2) == '[' && isdigit(*(letter + 3))) ||
484 				     (isdigit(*(letter + 2)))))
485 					state = 3;
486 				else
487 					state = 2;
488 			} else if (*letter == '\0') {
489 				return -EINVAL;
490 			}
491 			break;
492 		}
493 		letter++;
494 	}
495 }
496 
497 static int
498 devargs_parse_representor_ports(struct rte_eth_devargs *eth_devargs, char
499 				*da_val, unsigned int da_idx, unsigned int nb_da)
500 {
501 	struct rte_eth_devargs *eth_da;
502 	int result = 0;
503 
504 	if (da_idx + 1 > nb_da) {
505 		RTE_ETHDEV_LOG_LINE(ERR, "Devargs parsed %d > max array size %d",
506 			       da_idx + 1, nb_da);
507 		result = -1;
508 		goto parse_cleanup;
509 	}
510 	eth_da = &eth_devargs[da_idx];
511 	memset(eth_da, 0, sizeof(*eth_da));
512 	RTE_ETHDEV_LOG_LINE(DEBUG, "	  Devargs idx %d value %s", da_idx, da_val);
513 	result = rte_eth_devargs_parse_representor_ports(da_val, eth_da);
514 
515 parse_cleanup:
516 	return result;
517 }
518 
519 static int
520 eth_dev_tokenise_representor_list(char *p_val, struct rte_eth_devargs *eth_devargs,
521 				  unsigned int nb_da)
522 {
523 	char da_val[BUFSIZ], str[BUFSIZ];
524 	bool is_rep_portid_list = true;
525 	unsigned int devargs = 0;
526 	int result = 0, len = 0;
527 	int i = 0, j = 0;
528 	char *pos;
529 
530 	pos = p_val;
531 	/* Length of consolidated list */
532 	while (*pos++ != '\0') {
533 		len++;
534 		if (isalpha(*pos))
535 			is_rep_portid_list = false;
536 	}
537 
538 	/* List of representor portIDs i.e.[1,2,3] should be considered as single representor case*/
539 	if (is_rep_portid_list) {
540 		result = devargs_parse_representor_ports(eth_devargs, p_val, 0, 1);
541 		if (result < 0)
542 			return result;
543 
544 		devargs++;
545 		return devargs;
546 	}
547 
548 	memset(str, 0, BUFSIZ);
549 	memset(da_val, 0, BUFSIZ);
550 	/* Remove the exterior [] of the consolidated list */
551 	strncpy(str, &p_val[1], len - 2);
552 	while (1) {
553 		if (str[i] == '\0') {
554 			if (da_val[0] != '\0') {
555 				result = devargs_parse_representor_ports(eth_devargs, da_val,
556 									 devargs, nb_da);
557 				if (result < 0)
558 					goto parse_cleanup;
559 
560 				devargs++;
561 			}
562 			break;
563 		}
564 		if (str[i] == ',' || str[i] == '[') {
565 			if (str[i] == ',') {
566 				if (da_val[0] != '\0') {
567 					da_val[j + 1] = '\0';
568 					result = devargs_parse_representor_ports(eth_devargs,
569 										 da_val, devargs,
570 										 nb_da);
571 					if (result < 0)
572 						goto parse_cleanup;
573 
574 					devargs++;
575 					j = 0;
576 					memset(da_val, 0, BUFSIZ);
577 				}
578 			}
579 
580 			if (str[i] == '[') {
581 				while (str[i] != ']' || isalpha(str[i + 1])) {
582 					da_val[j] = str[i];
583 					j++;
584 					i++;
585 				}
586 				da_val[j] = ']';
587 				da_val[j + 1] = '\0';
588 				result = devargs_parse_representor_ports(eth_devargs, da_val,
589 									 devargs, nb_da);
590 				if (result < 0)
591 					goto parse_cleanup;
592 
593 				devargs++;
594 				j = 0;
595 				memset(da_val, 0, BUFSIZ);
596 			}
597 		} else {
598 			da_val[j] = str[i];
599 			j++;
600 		}
601 		i++;
602 	}
603 	result = devargs;
604 
605 parse_cleanup:
606 	return result;
607 }
608 
609 int
610 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_devargs,
611 		      unsigned int nb_da)
612 {
613 	struct rte_kvargs_pair *pair;
614 	struct rte_kvargs args;
615 	bool dup_rep = false;
616 	int devargs = 0;
617 	unsigned int i;
618 	int result = 0;
619 
620 	result = eth_dev_devargs_tokenise(&args, dargs);
621 	if (result < 0)
622 		goto parse_cleanup;
623 
624 	for (i = 0; i < args.count; i++) {
625 		pair = &args.pairs[i];
626 		if (strcmp("representor", pair->key) == 0) {
627 			if (dup_rep) {
628 				RTE_ETHDEV_LOG_LINE(ERR, "Duplicated representor key: %s",
629 						    pair->value);
630 				result = -1;
631 				goto parse_cleanup;
632 			}
633 
634 			RTE_ETHDEV_LOG_LINE(DEBUG, "Devarg pattern: %s", pair->value);
635 			if (pair->value[0] == '[') {
636 				/* Multiple representor list case */
637 				devargs = eth_dev_tokenise_representor_list(pair->value,
638 									    eth_devargs, nb_da);
639 				if (devargs < 0)
640 					goto parse_cleanup;
641 			} else {
642 				/* Single representor case */
643 				devargs = devargs_parse_representor_ports(eth_devargs, pair->value,
644 									  0, 1);
645 				if (devargs < 0)
646 					goto parse_cleanup;
647 				devargs++;
648 			}
649 			dup_rep = true;
650 		}
651 	}
652 	RTE_ETHDEV_LOG_LINE(DEBUG, "Total devargs parsed %d", devargs);
653 	result = devargs;
654 
655 parse_cleanup:
656 	free(args.str);
657 
658 	return result;
659 }
660 
661 static inline int
662 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
663 		const char *ring_name)
664 {
665 	return snprintf(name, len, "eth_p%d_q%d_%s",
666 			port_id, queue_id, ring_name);
667 }
668 
669 int
670 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
671 		uint16_t queue_id)
672 {
673 	char z_name[RTE_MEMZONE_NAMESIZE];
674 	const struct rte_memzone *mz;
675 	int rc = 0;
676 
677 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
678 			queue_id, ring_name);
679 	if (rc >= RTE_MEMZONE_NAMESIZE) {
680 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
681 		return -ENAMETOOLONG;
682 	}
683 
684 	mz = rte_memzone_lookup(z_name);
685 	if (mz)
686 		rc = rte_memzone_free(mz);
687 	else
688 		rc = -ENOENT;
689 
690 	return rc;
691 }
692 
693 const struct rte_memzone *
694 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
695 			 uint16_t queue_id, size_t size, unsigned int align,
696 			 int socket_id)
697 {
698 	char z_name[RTE_MEMZONE_NAMESIZE];
699 	const struct rte_memzone *mz;
700 	int rc;
701 
702 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
703 			queue_id, ring_name);
704 	if (rc >= RTE_MEMZONE_NAMESIZE) {
705 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
706 		rte_errno = ENAMETOOLONG;
707 		return NULL;
708 	}
709 
710 	mz = rte_memzone_lookup(z_name);
711 	if (mz) {
712 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
713 				size > mz->len ||
714 				((uintptr_t)mz->addr & (align - 1)) != 0) {
715 			RTE_ETHDEV_LOG_LINE(ERR,
716 				"memzone %s does not justify the requested attributes",
717 				mz->name);
718 			return NULL;
719 		}
720 
721 		return mz;
722 	}
723 
724 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
725 			RTE_MEMZONE_IOVA_CONTIG, align);
726 }
727 
728 int
729 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
730 				struct rte_hairpin_peer_info *peer_info,
731 				uint32_t direction)
732 {
733 	struct rte_eth_dev *dev;
734 
735 	if (peer_info == NULL)
736 		return -EINVAL;
737 
738 	/* No need to check the validity again. */
739 	dev = &rte_eth_devices[cur_port];
740 	if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
741 		return -ENOTSUP;
742 
743 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
744 							peer_info, direction);
745 }
746 
747 int
748 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
749 				  uint32_t direction)
750 {
751 	struct rte_eth_dev *dev;
752 
753 	/* No need to check the validity again. */
754 	dev = &rte_eth_devices[cur_port];
755 	if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
756 		return -ENOTSUP;
757 
758 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
759 							  direction);
760 }
761 
762 int
763 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
764 				  struct rte_hairpin_peer_info *cur_info,
765 				  struct rte_hairpin_peer_info *peer_info,
766 				  uint32_t direction)
767 {
768 	struct rte_eth_dev *dev;
769 
770 	/* Current queue information is not mandatory. */
771 	if (peer_info == NULL)
772 		return -EINVAL;
773 
774 	/* No need to check the validity again. */
775 	dev = &rte_eth_devices[peer_port];
776 	if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
777 		return -ENOTSUP;
778 
779 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
780 					cur_info, peer_info, direction);
781 }
782 
783 int
784 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
785 {
786 	static const struct rte_mbuf_dynfield field_desc = {
787 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
788 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
789 		.align = alignof(rte_eth_ip_reassembly_dynfield_t),
790 	};
791 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
792 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
793 	};
794 	int offset;
795 
796 	offset = rte_mbuf_dynfield_register(&field_desc);
797 	if (offset < 0)
798 		return -1;
799 	if (field_offset != NULL)
800 		*field_offset = offset;
801 
802 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
803 	if (offset < 0)
804 		return -1;
805 	if (flag_offset != NULL)
806 		*flag_offset = offset;
807 
808 	return 0;
809 }
810 
811 uint16_t
812 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
813 		struct rte_mbuf **pkts __rte_unused,
814 		uint16_t nb_pkts __rte_unused)
815 {
816 	return 0;
817 }
818 
819 int
820 rte_eth_representor_id_get(uint16_t port_id,
821 			   enum rte_eth_representor_type type,
822 			   int controller, int pf, int representor_port,
823 			   uint16_t *repr_id)
824 {
825 	int ret, n, count;
826 	uint32_t i;
827 	struct rte_eth_representor_info *info = NULL;
828 	size_t size;
829 
830 	if (type == RTE_ETH_REPRESENTOR_NONE)
831 		return 0;
832 	if (repr_id == NULL)
833 		return -EINVAL;
834 
835 	/* Get PMD representor range info. */
836 	ret = rte_eth_representor_info_get(port_id, NULL);
837 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
838 	    controller == -1 && pf == -1) {
839 		/* Direct mapping for legacy VF representor. */
840 		*repr_id = representor_port;
841 		return 0;
842 	} else if (ret < 0) {
843 		return ret;
844 	}
845 	n = ret;
846 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
847 	info = calloc(1, size);
848 	if (info == NULL)
849 		return -ENOMEM;
850 	info->nb_ranges_alloc = n;
851 	ret = rte_eth_representor_info_get(port_id, info);
852 	if (ret < 0)
853 		goto out;
854 
855 	/* Default controller and pf to caller. */
856 	if (controller == -1)
857 		controller = info->controller;
858 	if (pf == -1)
859 		pf = info->pf;
860 
861 	/* Locate representor ID. */
862 	ret = -ENOENT;
863 	for (i = 0; i < info->nb_ranges; ++i) {
864 		if (info->ranges[i].type != type)
865 			continue;
866 		if (info->ranges[i].controller != controller)
867 			continue;
868 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
869 			RTE_ETHDEV_LOG_LINE(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d",
870 				port_id, info->ranges[i].id_base,
871 				info->ranges[i].id_end, i);
872 			continue;
873 
874 		}
875 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
876 		switch (info->ranges[i].type) {
877 		case RTE_ETH_REPRESENTOR_PF:
878 			if (pf < info->ranges[i].pf ||
879 			    pf >= info->ranges[i].pf + count)
880 				continue;
881 			*repr_id = info->ranges[i].id_base +
882 				   (pf - info->ranges[i].pf);
883 			ret = 0;
884 			goto out;
885 		case RTE_ETH_REPRESENTOR_VF:
886 			if (info->ranges[i].pf != pf)
887 				continue;
888 			if (representor_port < info->ranges[i].vf ||
889 			    representor_port >= info->ranges[i].vf + count)
890 				continue;
891 			*repr_id = info->ranges[i].id_base +
892 				   (representor_port - info->ranges[i].vf);
893 			ret = 0;
894 			goto out;
895 		case RTE_ETH_REPRESENTOR_SF:
896 			if (info->ranges[i].pf != pf)
897 				continue;
898 			if (representor_port < info->ranges[i].sf ||
899 			    representor_port >= info->ranges[i].sf + count)
900 				continue;
901 			*repr_id = info->ranges[i].id_base +
902 			      (representor_port - info->ranges[i].sf);
903 			ret = 0;
904 			goto out;
905 		default:
906 			break;
907 		}
908 	}
909 out:
910 	free(info);
911 	return ret;
912 }
913 
914 int
915 rte_eth_switch_domain_alloc(uint16_t *domain_id)
916 {
917 	uint16_t i;
918 
919 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
920 
921 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
922 		if (eth_dev_switch_domains[i].state ==
923 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
924 			eth_dev_switch_domains[i].state =
925 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
926 			*domain_id = i;
927 			return 0;
928 		}
929 	}
930 
931 	return -ENOSPC;
932 }
933 
934 int
935 rte_eth_switch_domain_free(uint16_t domain_id)
936 {
937 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
938 		domain_id >= RTE_MAX_ETHPORTS)
939 		return -EINVAL;
940 
941 	if (eth_dev_switch_domains[domain_id].state !=
942 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
943 		return -EINVAL;
944 
945 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
946 
947 	return 0;
948 }
949