xref: /dpdk/lib/ethdev/ethdev_driver.c (revision 58143b7b386dd15c7e5db5cf5b280bcd19f6241b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdalign.h>
7 #include <stdlib.h>
8 #include <pthread.h>
9 
10 #include <rte_kvargs.h>
11 #include <rte_malloc.h>
12 
13 #include "ethdev_driver.h"
14 #include "ethdev_private.h"
15 
16 /**
17  * A set of values to describe the possible states of a switch domain.
18  */
19 enum rte_eth_switch_domain_state {
20 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
21 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
22 };
23 
24 /**
25  * Array of switch domains available for allocation. Array is sized to
26  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
27  * ethdev ports in a single process.
28  */
29 static struct rte_eth_dev_switch {
30 	enum rte_eth_switch_domain_state state;
31 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
32 
33 static struct rte_eth_dev *
34 eth_dev_allocated(const char *name)
35 {
36 	uint16_t i;
37 
38 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
39 
40 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
41 		if (rte_eth_devices[i].data != NULL &&
42 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
43 			return &rte_eth_devices[i];
44 	}
45 	return NULL;
46 }
47 
48 static uint16_t
49 eth_dev_find_free_port(void)
50 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
51 {
52 	uint16_t i;
53 
54 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
55 		/* Using shared name field to find a free port. */
56 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
57 			RTE_ASSERT(rte_eth_devices[i].state ==
58 				   RTE_ETH_DEV_UNUSED);
59 			return i;
60 		}
61 	}
62 	return RTE_MAX_ETHPORTS;
63 }
64 
65 static struct rte_eth_dev *
66 eth_dev_get(uint16_t port_id)
67 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
68 {
69 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
70 
71 	eth_dev->data = &eth_dev_shared_data->data[port_id];
72 
73 	return eth_dev;
74 }
75 
76 struct rte_eth_dev *
77 rte_eth_dev_allocate(const char *name)
78 {
79 	uint16_t port_id;
80 	struct rte_eth_dev *eth_dev = NULL;
81 	size_t name_len;
82 
83 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
84 	if (name_len == 0) {
85 		RTE_ETHDEV_LOG_LINE(ERR, "Zero length Ethernet device name");
86 		return NULL;
87 	}
88 
89 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
90 		RTE_ETHDEV_LOG_LINE(ERR, "Ethernet device name is too long");
91 		return NULL;
92 	}
93 
94 	/* Synchronize port creation between primary and secondary processes. */
95 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
96 
97 	if (eth_dev_shared_data_prepare() == NULL)
98 		goto unlock;
99 
100 	if (eth_dev_allocated(name) != NULL) {
101 		RTE_ETHDEV_LOG_LINE(ERR,
102 			"Ethernet device with name %s already allocated",
103 			name);
104 		goto unlock;
105 	}
106 
107 	port_id = eth_dev_find_free_port();
108 	if (port_id == RTE_MAX_ETHPORTS) {
109 		RTE_ETHDEV_LOG_LINE(ERR,
110 			"Reached maximum number of Ethernet ports");
111 		goto unlock;
112 	}
113 
114 	eth_dev = eth_dev_get(port_id);
115 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
116 	eth_dev->data->port_id = port_id;
117 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
118 	eth_dev->data->mtu = RTE_ETHER_MTU;
119 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
120 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
121 	eth_dev_shared_data->allocated_ports++;
122 
123 unlock:
124 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
125 
126 	return eth_dev;
127 }
128 
129 struct rte_eth_dev *
130 rte_eth_dev_allocated(const char *name)
131 {
132 	struct rte_eth_dev *ethdev;
133 
134 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
135 
136 	if (eth_dev_shared_data_prepare() != NULL)
137 		ethdev = eth_dev_allocated(name);
138 	else
139 		ethdev = NULL;
140 
141 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
142 
143 	return ethdev;
144 }
145 
146 /*
147  * Attach to a port already registered by the primary process, which
148  * makes sure that the same device would have the same port ID both
149  * in the primary and secondary process.
150  */
151 struct rte_eth_dev *
152 rte_eth_dev_attach_secondary(const char *name)
153 {
154 	uint16_t i;
155 	struct rte_eth_dev *eth_dev = NULL;
156 
157 	/* Synchronize port attachment to primary port creation and release. */
158 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
159 
160 	if (eth_dev_shared_data_prepare() == NULL)
161 		goto unlock;
162 
163 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
164 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
165 			break;
166 	}
167 	if (i == RTE_MAX_ETHPORTS) {
168 		RTE_ETHDEV_LOG_LINE(ERR,
169 			"Device %s is not driven by the primary process",
170 			name);
171 	} else {
172 		eth_dev = eth_dev_get(i);
173 		RTE_ASSERT(eth_dev->data->port_id == i);
174 	}
175 
176 unlock:
177 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
178 	return eth_dev;
179 }
180 
181 int
182 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
183 	enum rte_eth_event_type event, void *ret_param)
184 {
185 	struct rte_eth_dev_callback *cb_lst;
186 	struct rte_eth_dev_callback dev_cb;
187 	int rc = 0;
188 
189 	rte_spinlock_lock(&eth_dev_cb_lock);
190 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
191 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
192 			continue;
193 		dev_cb = *cb_lst;
194 		cb_lst->active = 1;
195 		if (ret_param != NULL)
196 			dev_cb.ret_param = ret_param;
197 
198 		rte_spinlock_unlock(&eth_dev_cb_lock);
199 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
200 				dev_cb.cb_arg, dev_cb.ret_param);
201 		rte_spinlock_lock(&eth_dev_cb_lock);
202 		cb_lst->active = 0;
203 	}
204 	rte_spinlock_unlock(&eth_dev_cb_lock);
205 	return rc;
206 }
207 
208 void
209 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
210 {
211 	if (dev == NULL)
212 		return;
213 
214 	/*
215 	 * for secondary process, at that point we expect device
216 	 * to be already 'usable', so shared data and all function pointers
217 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
218 	 */
219 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
220 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
221 
222 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
223 
224 	dev->state = RTE_ETH_DEV_ATTACHED;
225 }
226 
227 int
228 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
229 {
230 	int ret;
231 
232 	if (eth_dev == NULL)
233 		return -EINVAL;
234 
235 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
236 	if (eth_dev_shared_data_prepare() == NULL)
237 		ret = -EINVAL;
238 	else
239 		ret = 0;
240 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
241 	if (ret != 0)
242 		return ret;
243 
244 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
245 		rte_eth_dev_callback_process(eth_dev,
246 				RTE_ETH_EVENT_DESTROY, NULL);
247 
248 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
249 
250 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
251 
252 	eth_dev->state = RTE_ETH_DEV_UNUSED;
253 	eth_dev->device = NULL;
254 	eth_dev->process_private = NULL;
255 	eth_dev->intr_handle = NULL;
256 	eth_dev->rx_pkt_burst = NULL;
257 	eth_dev->tx_pkt_burst = NULL;
258 	eth_dev->tx_pkt_prepare = NULL;
259 	eth_dev->rx_queue_count = NULL;
260 	eth_dev->rx_descriptor_status = NULL;
261 	eth_dev->tx_descriptor_status = NULL;
262 	eth_dev->dev_ops = NULL;
263 
264 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
265 		rte_free(eth_dev->data->rx_queues);
266 		rte_free(eth_dev->data->tx_queues);
267 		rte_free(eth_dev->data->mac_addrs);
268 		rte_free(eth_dev->data->hash_mac_addrs);
269 		rte_free(eth_dev->data->dev_private);
270 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
271 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
272 		eth_dev->data = NULL;
273 
274 		eth_dev_shared_data->allocated_ports--;
275 		eth_dev_shared_data_release();
276 	}
277 
278 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
279 
280 	return 0;
281 }
282 
283 int
284 rte_eth_dev_create(struct rte_device *device, const char *name,
285 	size_t priv_data_size,
286 	ethdev_bus_specific_init ethdev_bus_specific_init,
287 	void *bus_init_params,
288 	ethdev_init_t ethdev_init, void *init_params)
289 {
290 	struct rte_eth_dev *ethdev;
291 	int retval;
292 
293 	if (*ethdev_init == NULL)
294 		return -EINVAL;
295 
296 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
297 		ethdev = rte_eth_dev_allocate(name);
298 		if (!ethdev)
299 			return -ENODEV;
300 
301 		if (priv_data_size) {
302 			ethdev->data->dev_private = rte_zmalloc_socket(
303 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
304 				device->numa_node);
305 
306 			if (!ethdev->data->dev_private) {
307 				RTE_ETHDEV_LOG_LINE(ERR,
308 					"failed to allocate private data");
309 				retval = -ENOMEM;
310 				goto probe_failed;
311 			}
312 		}
313 	} else {
314 		ethdev = rte_eth_dev_attach_secondary(name);
315 		if (!ethdev) {
316 			RTE_ETHDEV_LOG_LINE(ERR,
317 				"secondary process attach failed, ethdev doesn't exist");
318 			return  -ENODEV;
319 		}
320 	}
321 
322 	ethdev->device = device;
323 
324 	if (ethdev_bus_specific_init) {
325 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
326 		if (retval) {
327 			RTE_ETHDEV_LOG_LINE(ERR,
328 				"ethdev bus specific initialisation failed");
329 			goto probe_failed;
330 		}
331 	}
332 
333 	retval = ethdev_init(ethdev, init_params);
334 	if (retval) {
335 		RTE_ETHDEV_LOG_LINE(ERR, "ethdev initialisation failed");
336 		goto probe_failed;
337 	}
338 
339 	rte_eth_dev_probing_finish(ethdev);
340 
341 	return retval;
342 
343 probe_failed:
344 	rte_eth_dev_release_port(ethdev);
345 	return retval;
346 }
347 
348 int
349 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
350 	ethdev_uninit_t ethdev_uninit)
351 {
352 	int ret;
353 
354 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
355 	if (!ethdev)
356 		return -ENODEV;
357 
358 	if (*ethdev_uninit == NULL)
359 		return -EINVAL;
360 
361 	ret = ethdev_uninit(ethdev);
362 	if (ret)
363 		return ret;
364 
365 	return rte_eth_dev_release_port(ethdev);
366 }
367 
368 struct rte_eth_dev *
369 rte_eth_dev_get_by_name(const char *name)
370 {
371 	uint16_t pid;
372 
373 	if (rte_eth_dev_get_port_by_name(name, &pid))
374 		return NULL;
375 
376 	return &rte_eth_devices[pid];
377 }
378 
379 int
380 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
381 {
382 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
383 		return 1;
384 	return 0;
385 }
386 
387 int
388 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
389 {
390 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
391 		return 1;
392 	return 0;
393 }
394 
395 void
396 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
397 {
398 	if (dev->data->dev_started) {
399 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u must be stopped to allow reset",
400 			dev->data->port_id);
401 		return;
402 	}
403 
404 	eth_dev_rx_queue_config(dev, 0);
405 	eth_dev_tx_queue_config(dev, 0);
406 
407 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
408 }
409 
410 static int
411 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
412 {
413 	int state;
414 	struct rte_kvargs_pair *pair;
415 	char *letter;
416 
417 	arglist->str = strdup(str_in);
418 	if (arglist->str == NULL)
419 		return -ENOMEM;
420 
421 	letter = arglist->str;
422 	state = 0;
423 	arglist->count = 0;
424 	pair = &arglist->pairs[0];
425 	while (1) {
426 		switch (state) {
427 		case 0: /* Initial */
428 			if (*letter == '=')
429 				return -EINVAL;
430 			else if (*letter == '\0')
431 				return 0;
432 
433 			state = 1;
434 			pair->key = letter;
435 			/* fallthrough */
436 
437 		case 1: /* Parsing key */
438 			if (*letter == '=') {
439 				*letter = '\0';
440 				pair->value = letter + 1;
441 				state = 2;
442 			} else if (*letter == ',' || *letter == '\0')
443 				return -EINVAL;
444 			break;
445 
446 
447 		case 2: /* Parsing value */
448 			if (*letter == '[')
449 				state = 3;
450 			else if (*letter == ',') {
451 				*letter = '\0';
452 				arglist->count++;
453 				pair = &arglist->pairs[arglist->count];
454 				state = 0;
455 			} else if (*letter == '\0') {
456 				letter--;
457 				arglist->count++;
458 				pair = &arglist->pairs[arglist->count];
459 				state = 0;
460 			}
461 			break;
462 
463 		case 3: /* Parsing list */
464 			if (*letter == ']') {
465 				/* For devargs having singles lists move to state 2 once letter
466 				 * becomes ']' so each can be considered as different pair key
467 				 * value. But in nested lists case e.g. multiple representors
468 				 * case i.e. [pf[0-3],pfvf[3,4-6]], complete nested list should
469 				 * be considered as one pair value, hence checking if end of outer
470 				 * list ']' is reached else stay on state 3.
471 				 */
472 				if ((strcmp("representor", pair->key) == 0) &&
473 				    (*(letter + 1) != '\0' && *(letter + 2) != '\0' &&
474 				     *(letter + 3) != '\0')			    &&
475 				    ((*(letter + 2) == 'p' && *(letter + 3) == 'f')   ||
476 				     (*(letter + 2) == 'v' && *(letter + 3) == 'f')   ||
477 				     (*(letter + 2) == 's' && *(letter + 3) == 'f')   ||
478 				     (*(letter + 2) == 'c' && isdigit(*(letter + 3))) ||
479 				     (*(letter + 2) == '[' && isdigit(*(letter + 3))) ||
480 				     (isdigit(*(letter + 2)))))
481 					state = 3;
482 				else
483 					state = 2;
484 			} else if (*letter == '\0') {
485 				return -EINVAL;
486 			}
487 			break;
488 		}
489 		letter++;
490 	}
491 }
492 
493 static int
494 devargs_parse_representor_ports(struct rte_eth_devargs *eth_devargs, char
495 				*da_val, unsigned int da_idx, unsigned int nb_da)
496 {
497 	struct rte_eth_devargs *eth_da;
498 	int result = 0;
499 
500 	if (da_idx + 1 > nb_da) {
501 		RTE_ETHDEV_LOG_LINE(ERR, "Devargs parsed %d > max array size %d",
502 			       da_idx + 1, nb_da);
503 		result = -1;
504 		goto parse_cleanup;
505 	}
506 	eth_da = &eth_devargs[da_idx];
507 	memset(eth_da, 0, sizeof(*eth_da));
508 	RTE_ETHDEV_LOG_LINE(DEBUG, "	  Devargs idx %d value %s", da_idx, da_val);
509 	result = rte_eth_devargs_parse_representor_ports(da_val, eth_da);
510 
511 parse_cleanup:
512 	return result;
513 }
514 
515 static int
516 eth_dev_tokenise_representor_list(char *p_val, struct rte_eth_devargs *eth_devargs,
517 				  unsigned int nb_da)
518 {
519 	char da_val[BUFSIZ], str[BUFSIZ];
520 	bool is_rep_portid_list = true;
521 	unsigned int devargs = 0;
522 	int result = 0, len = 0;
523 	int i = 0, j = 0;
524 	char *pos;
525 
526 	pos = p_val;
527 	/* Length of consolidated list */
528 	while (*pos++ != '\0') {
529 		len++;
530 		if (isalpha(*pos))
531 			is_rep_portid_list = false;
532 	}
533 
534 	/* List of representor portIDs i.e.[1,2,3] should be considered as single representor case*/
535 	if (is_rep_portid_list) {
536 		result = devargs_parse_representor_ports(eth_devargs, p_val, 0, 1);
537 		if (result < 0)
538 			return result;
539 
540 		devargs++;
541 		return devargs;
542 	}
543 
544 	memset(str, 0, BUFSIZ);
545 	/* Remove the exterior [] of the consolidated list */
546 	strncpy(str, &p_val[1], len - 2);
547 	while (1) {
548 		if (str[i] == '\0') {
549 			if (da_val[0] != '\0') {
550 				result = devargs_parse_representor_ports(eth_devargs, da_val,
551 									 devargs, nb_da);
552 				if (result < 0)
553 					goto parse_cleanup;
554 
555 				devargs++;
556 			}
557 			break;
558 		}
559 		if (str[i] == ',' || str[i] == '[') {
560 			if (str[i] == ',') {
561 				if (da_val[0] != '\0') {
562 					da_val[j + 1] = '\0';
563 					result = devargs_parse_representor_ports(eth_devargs,
564 										 da_val, devargs,
565 										 nb_da);
566 					if (result < 0)
567 						goto parse_cleanup;
568 
569 					devargs++;
570 					j = 0;
571 					memset(da_val, 0, BUFSIZ);
572 				}
573 			}
574 
575 			if (str[i] == '[') {
576 				while (str[i] != ']' || isalpha(str[i + 1])) {
577 					da_val[j] = str[i];
578 					j++;
579 					i++;
580 				}
581 				da_val[j] = ']';
582 				da_val[j + 1] = '\0';
583 				result = devargs_parse_representor_ports(eth_devargs, da_val,
584 									 devargs, nb_da);
585 				if (result < 0)
586 					goto parse_cleanup;
587 
588 				devargs++;
589 				j = 0;
590 				memset(da_val, 0, BUFSIZ);
591 			}
592 		} else {
593 			da_val[j] = str[i];
594 			j++;
595 		}
596 		i++;
597 	}
598 	result = devargs;
599 
600 parse_cleanup:
601 	return result;
602 }
603 
604 int
605 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_devargs,
606 		      unsigned int nb_da)
607 {
608 	struct rte_kvargs_pair *pair;
609 	struct rte_kvargs args;
610 	bool dup_rep = false;
611 	int devargs = 0;
612 	unsigned int i;
613 	int result = 0;
614 
615 	result = eth_dev_devargs_tokenise(&args, dargs);
616 	if (result < 0)
617 		goto parse_cleanup;
618 
619 	for (i = 0; i < args.count; i++) {
620 		pair = &args.pairs[i];
621 		if (strcmp("representor", pair->key) == 0) {
622 			if (dup_rep) {
623 				RTE_ETHDEV_LOG_LINE(ERR, "Duplicated representor key: %s",
624 						    pair->value);
625 				result = -1;
626 				goto parse_cleanup;
627 			}
628 
629 			RTE_ETHDEV_LOG_LINE(DEBUG, "Devarg pattern: %s", pair->value);
630 			if (pair->value[0] == '[') {
631 				/* Multiple representor list case */
632 				devargs = eth_dev_tokenise_representor_list(pair->value,
633 									    eth_devargs, nb_da);
634 				if (devargs < 0)
635 					goto parse_cleanup;
636 			} else {
637 				/* Single representor case */
638 				devargs = devargs_parse_representor_ports(eth_devargs, pair->value,
639 									  0, 1);
640 				if (devargs < 0)
641 					goto parse_cleanup;
642 				devargs++;
643 			}
644 			dup_rep = true;
645 		}
646 	}
647 	RTE_ETHDEV_LOG_LINE(DEBUG, "Total devargs parsed %d", devargs);
648 	result = devargs;
649 
650 parse_cleanup:
651 	free(args.str);
652 
653 	return result;
654 }
655 
656 static inline int
657 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
658 		const char *ring_name)
659 {
660 	return snprintf(name, len, "eth_p%d_q%d_%s",
661 			port_id, queue_id, ring_name);
662 }
663 
664 int
665 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
666 		uint16_t queue_id)
667 {
668 	char z_name[RTE_MEMZONE_NAMESIZE];
669 	const struct rte_memzone *mz;
670 	int rc = 0;
671 
672 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
673 			queue_id, ring_name);
674 	if (rc >= RTE_MEMZONE_NAMESIZE) {
675 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
676 		return -ENAMETOOLONG;
677 	}
678 
679 	mz = rte_memzone_lookup(z_name);
680 	if (mz)
681 		rc = rte_memzone_free(mz);
682 	else
683 		rc = -ENOENT;
684 
685 	return rc;
686 }
687 
688 const struct rte_memzone *
689 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
690 			 uint16_t queue_id, size_t size, unsigned int align,
691 			 int socket_id)
692 {
693 	char z_name[RTE_MEMZONE_NAMESIZE];
694 	const struct rte_memzone *mz;
695 	int rc;
696 
697 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
698 			queue_id, ring_name);
699 	if (rc >= RTE_MEMZONE_NAMESIZE) {
700 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
701 		rte_errno = ENAMETOOLONG;
702 		return NULL;
703 	}
704 
705 	mz = rte_memzone_lookup(z_name);
706 	if (mz) {
707 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
708 				size > mz->len ||
709 				((uintptr_t)mz->addr & (align - 1)) != 0) {
710 			RTE_ETHDEV_LOG_LINE(ERR,
711 				"memzone %s does not justify the requested attributes",
712 				mz->name);
713 			return NULL;
714 		}
715 
716 		return mz;
717 	}
718 
719 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
720 			RTE_MEMZONE_IOVA_CONTIG, align);
721 }
722 
723 int
724 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
725 				struct rte_hairpin_peer_info *peer_info,
726 				uint32_t direction)
727 {
728 	struct rte_eth_dev *dev;
729 
730 	if (peer_info == NULL)
731 		return -EINVAL;
732 
733 	/* No need to check the validity again. */
734 	dev = &rte_eth_devices[cur_port];
735 	if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
736 		return -ENOTSUP;
737 
738 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
739 							peer_info, direction);
740 }
741 
742 int
743 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
744 				  uint32_t direction)
745 {
746 	struct rte_eth_dev *dev;
747 
748 	/* No need to check the validity again. */
749 	dev = &rte_eth_devices[cur_port];
750 	if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
751 		return -ENOTSUP;
752 
753 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
754 							  direction);
755 }
756 
757 int
758 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
759 				  struct rte_hairpin_peer_info *cur_info,
760 				  struct rte_hairpin_peer_info *peer_info,
761 				  uint32_t direction)
762 {
763 	struct rte_eth_dev *dev;
764 
765 	/* Current queue information is not mandatory. */
766 	if (peer_info == NULL)
767 		return -EINVAL;
768 
769 	/* No need to check the validity again. */
770 	dev = &rte_eth_devices[peer_port];
771 	if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
772 		return -ENOTSUP;
773 
774 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
775 					cur_info, peer_info, direction);
776 }
777 
778 int
779 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
780 {
781 	static const struct rte_mbuf_dynfield field_desc = {
782 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
783 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
784 		.align = alignof(rte_eth_ip_reassembly_dynfield_t),
785 	};
786 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
787 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
788 	};
789 	int offset;
790 
791 	offset = rte_mbuf_dynfield_register(&field_desc);
792 	if (offset < 0)
793 		return -1;
794 	if (field_offset != NULL)
795 		*field_offset = offset;
796 
797 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
798 	if (offset < 0)
799 		return -1;
800 	if (flag_offset != NULL)
801 		*flag_offset = offset;
802 
803 	return 0;
804 }
805 
806 uint16_t
807 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
808 		struct rte_mbuf **pkts __rte_unused,
809 		uint16_t nb_pkts __rte_unused)
810 {
811 	return 0;
812 }
813 
814 int
815 rte_eth_representor_id_get(uint16_t port_id,
816 			   enum rte_eth_representor_type type,
817 			   int controller, int pf, int representor_port,
818 			   uint16_t *repr_id)
819 {
820 	int ret, n, count;
821 	uint32_t i;
822 	struct rte_eth_representor_info *info = NULL;
823 	size_t size;
824 
825 	if (type == RTE_ETH_REPRESENTOR_NONE)
826 		return 0;
827 	if (repr_id == NULL)
828 		return -EINVAL;
829 
830 	/* Get PMD representor range info. */
831 	ret = rte_eth_representor_info_get(port_id, NULL);
832 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
833 	    controller == -1 && pf == -1) {
834 		/* Direct mapping for legacy VF representor. */
835 		*repr_id = representor_port;
836 		return 0;
837 	} else if (ret < 0) {
838 		return ret;
839 	}
840 	n = ret;
841 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
842 	info = calloc(1, size);
843 	if (info == NULL)
844 		return -ENOMEM;
845 	info->nb_ranges_alloc = n;
846 	ret = rte_eth_representor_info_get(port_id, info);
847 	if (ret < 0)
848 		goto out;
849 
850 	/* Default controller and pf to caller. */
851 	if (controller == -1)
852 		controller = info->controller;
853 	if (pf == -1)
854 		pf = info->pf;
855 
856 	/* Locate representor ID. */
857 	ret = -ENOENT;
858 	for (i = 0; i < info->nb_ranges; ++i) {
859 		if (info->ranges[i].type != type)
860 			continue;
861 		if (info->ranges[i].controller != controller)
862 			continue;
863 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
864 			RTE_ETHDEV_LOG_LINE(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d",
865 				port_id, info->ranges[i].id_base,
866 				info->ranges[i].id_end, i);
867 			continue;
868 
869 		}
870 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
871 		switch (info->ranges[i].type) {
872 		case RTE_ETH_REPRESENTOR_PF:
873 			if (pf < info->ranges[i].pf ||
874 			    pf >= info->ranges[i].pf + count)
875 				continue;
876 			*repr_id = info->ranges[i].id_base +
877 				   (pf - info->ranges[i].pf);
878 			ret = 0;
879 			goto out;
880 		case RTE_ETH_REPRESENTOR_VF:
881 			if (info->ranges[i].pf != pf)
882 				continue;
883 			if (representor_port < info->ranges[i].vf ||
884 			    representor_port >= info->ranges[i].vf + count)
885 				continue;
886 			*repr_id = info->ranges[i].id_base +
887 				   (representor_port - info->ranges[i].vf);
888 			ret = 0;
889 			goto out;
890 		case RTE_ETH_REPRESENTOR_SF:
891 			if (info->ranges[i].pf != pf)
892 				continue;
893 			if (representor_port < info->ranges[i].sf ||
894 			    representor_port >= info->ranges[i].sf + count)
895 				continue;
896 			*repr_id = info->ranges[i].id_base +
897 			      (representor_port - info->ranges[i].sf);
898 			ret = 0;
899 			goto out;
900 		default:
901 			break;
902 		}
903 	}
904 out:
905 	free(info);
906 	return ret;
907 }
908 
909 int
910 rte_eth_switch_domain_alloc(uint16_t *domain_id)
911 {
912 	uint16_t i;
913 
914 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
915 
916 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
917 		if (eth_dev_switch_domains[i].state ==
918 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
919 			eth_dev_switch_domains[i].state =
920 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
921 			*domain_id = i;
922 			return 0;
923 		}
924 	}
925 
926 	return -ENOSPC;
927 }
928 
929 int
930 rte_eth_switch_domain_free(uint16_t domain_id)
931 {
932 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
933 		domain_id >= RTE_MAX_ETHPORTS)
934 		return -EINVAL;
935 
936 	if (eth_dev_switch_domains[domain_id].state !=
937 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
938 		return -EINVAL;
939 
940 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
941 
942 	return 0;
943 }
944