xref: /dpdk/lib/ethdev/ethdev_driver.c (revision bd4a5aa413583aa698f10849c4784a3d524566bc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <stdalign.h>
7 #include <stdlib.h>
8 #include <pthread.h>
9 
10 #include <rte_kvargs.h>
11 #include <rte_malloc.h>
12 
13 #include "ethdev_driver.h"
14 #include "ethdev_private.h"
15 #include "rte_flow_driver.h"
16 
17 /**
18  * A set of values to describe the possible states of a switch domain.
19  */
20 enum rte_eth_switch_domain_state {
21 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
22 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
23 };
24 
25 /**
26  * Array of switch domains available for allocation. Array is sized to
27  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
28  * ethdev ports in a single process.
29  */
30 static struct rte_eth_dev_switch {
31 	enum rte_eth_switch_domain_state state;
32 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
33 
34 static struct rte_eth_dev *
35 eth_dev_allocated(const char *name)
36 {
37 	uint16_t i;
38 
39 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
40 
41 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
42 		if (rte_eth_devices[i].data != NULL &&
43 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
44 			return &rte_eth_devices[i];
45 	}
46 	return NULL;
47 }
48 
49 static uint16_t
50 eth_dev_find_free_port(void)
51 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
52 {
53 	uint16_t i;
54 
55 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
56 		/* Using shared name field to find a free port. */
57 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
58 			RTE_ASSERT(rte_eth_devices[i].state ==
59 				   RTE_ETH_DEV_UNUSED);
60 			return i;
61 		}
62 	}
63 	return RTE_MAX_ETHPORTS;
64 }
65 
66 static struct rte_eth_dev *
67 eth_dev_get(uint16_t port_id)
68 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
69 {
70 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
71 
72 	eth_dev->data = &eth_dev_shared_data->data[port_id];
73 
74 	return eth_dev;
75 }
76 
77 struct rte_eth_dev *
78 rte_eth_dev_allocate(const char *name)
79 {
80 	uint16_t port_id;
81 	struct rte_eth_dev *eth_dev = NULL;
82 	size_t name_len;
83 
84 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
85 	if (name_len == 0) {
86 		RTE_ETHDEV_LOG_LINE(ERR, "Zero length Ethernet device name");
87 		return NULL;
88 	}
89 
90 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
91 		RTE_ETHDEV_LOG_LINE(ERR, "Ethernet device name is too long");
92 		return NULL;
93 	}
94 
95 	/* Synchronize port creation between primary and secondary processes. */
96 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
97 
98 	if (eth_dev_shared_data_prepare() == NULL)
99 		goto unlock;
100 
101 	if (eth_dev_allocated(name) != NULL) {
102 		RTE_ETHDEV_LOG_LINE(ERR,
103 			"Ethernet device with name %s already allocated",
104 			name);
105 		goto unlock;
106 	}
107 
108 	port_id = eth_dev_find_free_port();
109 	if (port_id == RTE_MAX_ETHPORTS) {
110 		RTE_ETHDEV_LOG_LINE(ERR,
111 			"Reached maximum number of Ethernet ports");
112 		goto unlock;
113 	}
114 
115 	eth_dev = eth_dev_get(port_id);
116 	eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
117 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
118 	eth_dev->data->port_id = port_id;
119 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
120 	eth_dev->data->mtu = RTE_ETHER_MTU;
121 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
122 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
123 	eth_dev_shared_data->allocated_ports++;
124 
125 unlock:
126 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
127 
128 	return eth_dev;
129 }
130 
131 struct rte_eth_dev *
132 rte_eth_dev_allocated(const char *name)
133 {
134 	struct rte_eth_dev *ethdev;
135 
136 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
137 
138 	if (eth_dev_shared_data_prepare() != NULL)
139 		ethdev = eth_dev_allocated(name);
140 	else
141 		ethdev = NULL;
142 
143 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
144 
145 	return ethdev;
146 }
147 
148 /*
149  * Attach to a port already registered by the primary process, which
150  * makes sure that the same device would have the same port ID both
151  * in the primary and secondary process.
152  */
153 struct rte_eth_dev *
154 rte_eth_dev_attach_secondary(const char *name)
155 {
156 	uint16_t i;
157 	struct rte_eth_dev *eth_dev = NULL;
158 
159 	/* Synchronize port attachment to primary port creation and release. */
160 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
161 
162 	if (eth_dev_shared_data_prepare() == NULL)
163 		goto unlock;
164 
165 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
166 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
167 			break;
168 	}
169 	if (i == RTE_MAX_ETHPORTS) {
170 		RTE_ETHDEV_LOG_LINE(ERR,
171 			"Device %s is not driven by the primary process",
172 			name);
173 	} else {
174 		eth_dev = eth_dev_get(i);
175 		RTE_ASSERT(eth_dev->data->port_id == i);
176 	}
177 
178 unlock:
179 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
180 	return eth_dev;
181 }
182 
183 int
184 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
185 	enum rte_eth_event_type event, void *ret_param)
186 {
187 	struct rte_eth_dev_callback *cb_lst;
188 	struct rte_eth_dev_callback dev_cb;
189 	int rc = 0;
190 
191 	rte_spinlock_lock(&eth_dev_cb_lock);
192 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
193 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
194 			continue;
195 		dev_cb = *cb_lst;
196 		cb_lst->active = 1;
197 		if (ret_param != NULL)
198 			dev_cb.ret_param = ret_param;
199 
200 		rte_spinlock_unlock(&eth_dev_cb_lock);
201 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
202 				dev_cb.cb_arg, dev_cb.ret_param);
203 		rte_spinlock_lock(&eth_dev_cb_lock);
204 		cb_lst->active = 0;
205 	}
206 	rte_spinlock_unlock(&eth_dev_cb_lock);
207 	return rc;
208 }
209 
210 void
211 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
212 {
213 	if (dev == NULL)
214 		return;
215 
216 	/*
217 	 * for secondary process, at that point we expect device
218 	 * to be already 'usable', so shared data and all function pointers
219 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
220 	 */
221 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
222 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
223 
224 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
225 
226 	dev->state = RTE_ETH_DEV_ATTACHED;
227 }
228 
229 int
230 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
231 {
232 	int ret;
233 
234 	if (eth_dev == NULL)
235 		return -EINVAL;
236 
237 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
238 	if (eth_dev_shared_data_prepare() == NULL)
239 		ret = -EINVAL;
240 	else
241 		ret = 0;
242 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
243 	if (ret != 0)
244 		return ret;
245 
246 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
247 		rte_eth_dev_callback_process(eth_dev,
248 				RTE_ETH_EVENT_DESTROY, NULL);
249 
250 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
251 
252 	eth_dev->flow_fp_ops = &rte_flow_fp_default_ops;
253 
254 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
255 
256 	eth_dev->state = RTE_ETH_DEV_UNUSED;
257 	eth_dev->device = NULL;
258 	eth_dev->process_private = NULL;
259 	eth_dev->intr_handle = NULL;
260 	eth_dev->rx_pkt_burst = NULL;
261 	eth_dev->tx_pkt_burst = NULL;
262 	eth_dev->tx_pkt_prepare = NULL;
263 	eth_dev->rx_queue_count = NULL;
264 	eth_dev->rx_descriptor_status = NULL;
265 	eth_dev->tx_descriptor_status = NULL;
266 	eth_dev->dev_ops = NULL;
267 
268 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
269 		rte_free(eth_dev->data->rx_queues);
270 		rte_free(eth_dev->data->tx_queues);
271 		rte_free(eth_dev->data->mac_addrs);
272 		rte_free(eth_dev->data->hash_mac_addrs);
273 		rte_free(eth_dev->data->dev_private);
274 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
275 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
276 		eth_dev->data = NULL;
277 
278 		eth_dev_shared_data->allocated_ports--;
279 		eth_dev_shared_data_release();
280 	}
281 
282 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
283 
284 	return 0;
285 }
286 
287 int
288 rte_eth_dev_create(struct rte_device *device, const char *name,
289 	size_t priv_data_size,
290 	ethdev_bus_specific_init ethdev_bus_specific_init,
291 	void *bus_init_params,
292 	ethdev_init_t ethdev_init, void *init_params)
293 {
294 	struct rte_eth_dev *ethdev;
295 	int retval;
296 
297 	if (*ethdev_init == NULL)
298 		return -EINVAL;
299 
300 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
301 		ethdev = rte_eth_dev_allocate(name);
302 		if (!ethdev)
303 			return -ENODEV;
304 
305 		if (priv_data_size) {
306 			ethdev->data->dev_private = rte_zmalloc_socket(
307 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
308 				device->numa_node);
309 
310 			if (!ethdev->data->dev_private) {
311 				RTE_ETHDEV_LOG_LINE(ERR,
312 					"failed to allocate private data");
313 				retval = -ENOMEM;
314 				goto probe_failed;
315 			}
316 		}
317 	} else {
318 		ethdev = rte_eth_dev_attach_secondary(name);
319 		if (!ethdev) {
320 			RTE_ETHDEV_LOG_LINE(ERR,
321 				"secondary process attach failed, ethdev doesn't exist");
322 			return  -ENODEV;
323 		}
324 	}
325 
326 	ethdev->device = device;
327 
328 	if (ethdev_bus_specific_init) {
329 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
330 		if (retval) {
331 			RTE_ETHDEV_LOG_LINE(ERR,
332 				"ethdev bus specific initialisation failed");
333 			goto probe_failed;
334 		}
335 	}
336 
337 	retval = ethdev_init(ethdev, init_params);
338 	if (retval) {
339 		RTE_ETHDEV_LOG_LINE(ERR, "ethdev initialisation failed");
340 		goto probe_failed;
341 	}
342 
343 	rte_eth_dev_probing_finish(ethdev);
344 
345 	return retval;
346 
347 probe_failed:
348 	rte_eth_dev_release_port(ethdev);
349 	return retval;
350 }
351 
352 int
353 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
354 	ethdev_uninit_t ethdev_uninit)
355 {
356 	int ret;
357 
358 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
359 	if (!ethdev)
360 		return -ENODEV;
361 
362 	if (*ethdev_uninit == NULL)
363 		return -EINVAL;
364 
365 	ret = ethdev_uninit(ethdev);
366 	if (ret)
367 		return ret;
368 
369 	return rte_eth_dev_release_port(ethdev);
370 }
371 
372 struct rte_eth_dev *
373 rte_eth_dev_get_by_name(const char *name)
374 {
375 	uint16_t pid;
376 
377 	if (rte_eth_dev_get_port_by_name(name, &pid))
378 		return NULL;
379 
380 	return &rte_eth_devices[pid];
381 }
382 
383 int
384 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
385 {
386 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
387 		return 1;
388 	return 0;
389 }
390 
391 int
392 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
393 {
394 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
395 		return 1;
396 	return 0;
397 }
398 
399 void
400 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
401 {
402 	if (dev->data->dev_started) {
403 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u must be stopped to allow reset",
404 			dev->data->port_id);
405 		return;
406 	}
407 
408 	eth_dev_rx_queue_config(dev, 0);
409 	eth_dev_tx_queue_config(dev, 0);
410 
411 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
412 }
413 
414 static int
415 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
416 {
417 	int state;
418 	struct rte_kvargs_pair *pair;
419 	char *letter;
420 
421 	arglist->str = strdup(str_in);
422 	if (arglist->str == NULL)
423 		return -ENOMEM;
424 
425 	letter = arglist->str;
426 	state = 0;
427 	arglist->count = 0;
428 	pair = &arglist->pairs[0];
429 	while (1) {
430 		switch (state) {
431 		case 0: /* Initial */
432 			if (*letter == '=')
433 				return -EINVAL;
434 			else if (*letter == '\0')
435 				return 0;
436 
437 			state = 1;
438 			pair->key = letter;
439 			/* fallthrough */
440 
441 		case 1: /* Parsing key */
442 			if (*letter == '=') {
443 				*letter = '\0';
444 				pair->value = letter + 1;
445 				state = 2;
446 			} else if (*letter == ',' || *letter == '\0')
447 				return -EINVAL;
448 			break;
449 
450 
451 		case 2: /* Parsing value */
452 			if (*letter == '[')
453 				state = 3;
454 			else if (*letter == ',') {
455 				*letter = '\0';
456 				arglist->count++;
457 				pair = &arglist->pairs[arglist->count];
458 				state = 0;
459 			} else if (*letter == '\0') {
460 				letter--;
461 				arglist->count++;
462 				pair = &arglist->pairs[arglist->count];
463 				state = 0;
464 			}
465 			break;
466 
467 		case 3: /* Parsing list */
468 			if (*letter == ']') {
469 				/* For devargs having singles lists move to state 2 once letter
470 				 * becomes ']' so each can be considered as different pair key
471 				 * value. But in nested lists case e.g. multiple representors
472 				 * case i.e. [pf[0-3],pfvf[3,4-6]], complete nested list should
473 				 * be considered as one pair value, hence checking if end of outer
474 				 * list ']' is reached else stay on state 3.
475 				 */
476 				if ((strcmp("representor", pair->key) == 0) &&
477 				    (*(letter + 1) != '\0' && *(letter + 2) != '\0' &&
478 				     *(letter + 3) != '\0')			    &&
479 				    ((*(letter + 2) == 'p' && *(letter + 3) == 'f')   ||
480 				     (*(letter + 2) == 'v' && *(letter + 3) == 'f')   ||
481 				     (*(letter + 2) == 's' && *(letter + 3) == 'f')   ||
482 				     (*(letter + 2) == 'c' && isdigit(*(letter + 3))) ||
483 				     (*(letter + 2) == '[' && isdigit(*(letter + 3))) ||
484 				     (isdigit(*(letter + 2)))))
485 					state = 3;
486 				else
487 					state = 2;
488 			} else if (*letter == '\0') {
489 				return -EINVAL;
490 			}
491 			break;
492 		}
493 		letter++;
494 	}
495 }
496 
497 static int
498 devargs_parse_representor_ports(struct rte_eth_devargs *eth_devargs, char
499 				*da_val, unsigned int da_idx, unsigned int nb_da)
500 {
501 	struct rte_eth_devargs *eth_da;
502 	int result = 0;
503 
504 	if (da_idx + 1 > nb_da) {
505 		RTE_ETHDEV_LOG_LINE(ERR, "Devargs parsed %d > max array size %d",
506 			       da_idx + 1, nb_da);
507 		result = -1;
508 		goto parse_cleanup;
509 	}
510 	eth_da = &eth_devargs[da_idx];
511 	memset(eth_da, 0, sizeof(*eth_da));
512 	RTE_ETHDEV_LOG_LINE(DEBUG, "	  Devargs idx %d value %s", da_idx, da_val);
513 	result = rte_eth_devargs_parse_representor_ports(da_val, eth_da);
514 
515 parse_cleanup:
516 	return result;
517 }
518 
519 static int
520 eth_dev_tokenise_representor_list(char *p_val, struct rte_eth_devargs *eth_devargs,
521 				  unsigned int nb_da)
522 {
523 	char da_val[BUFSIZ], str[BUFSIZ];
524 	bool is_rep_portid_list = true;
525 	unsigned int devargs = 0;
526 	int result = 0, len = 0;
527 	int i = 0, j = 0;
528 	char *pos;
529 
530 	pos = p_val;
531 	/* Length of consolidated list */
532 	while (*pos++ != '\0') {
533 		len++;
534 		if (isalpha(*pos))
535 			is_rep_portid_list = false;
536 	}
537 
538 	/* List of representor portIDs i.e.[1,2,3] should be considered as single representor case*/
539 	if (is_rep_portid_list) {
540 		result = devargs_parse_representor_ports(eth_devargs, p_val, 0, 1);
541 		if (result < 0)
542 			return result;
543 
544 		devargs++;
545 		return devargs;
546 	}
547 
548 	memset(str, 0, BUFSIZ);
549 	memset(da_val, 0, BUFSIZ);
550 	/* Remove the exterior [] of the consolidated list */
551 	strncpy(str, &p_val[1], len - 2);
552 	while (1) {
553 		if (str[i] == '\0') {
554 			if (da_val[0] != '\0') {
555 				result = devargs_parse_representor_ports(eth_devargs, da_val,
556 									 devargs, nb_da);
557 				if (result < 0)
558 					goto parse_cleanup;
559 
560 				devargs++;
561 			}
562 			break;
563 		}
564 		if (str[i] == ',' || str[i] == '[') {
565 			if (str[i] == ',') {
566 				if (da_val[0] != '\0') {
567 					da_val[j + 1] = '\0';
568 					result = devargs_parse_representor_ports(eth_devargs,
569 										 da_val, devargs,
570 										 nb_da);
571 					if (result < 0)
572 						goto parse_cleanup;
573 
574 					devargs++;
575 					j = 0;
576 					memset(da_val, 0, BUFSIZ);
577 				}
578 			}
579 
580 			if (str[i] == '[') {
581 				while (str[i] != ']' || isalpha(str[i + 1])) {
582 					da_val[j] = str[i];
583 					j++;
584 					i++;
585 				}
586 				da_val[j] = ']';
587 				da_val[j + 1] = '\0';
588 				result = devargs_parse_representor_ports(eth_devargs, da_val,
589 									 devargs, nb_da);
590 				if (result < 0)
591 					goto parse_cleanup;
592 
593 				devargs++;
594 				j = 0;
595 				memset(da_val, 0, BUFSIZ);
596 			}
597 		} else {
598 			da_val[j] = str[i];
599 			j++;
600 		}
601 		i++;
602 	}
603 	result = devargs;
604 
605 parse_cleanup:
606 	return result;
607 }
608 
609 int
610 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_devargs,
611 		      unsigned int nb_da)
612 {
613 	struct rte_kvargs_pair *pair;
614 	struct rte_kvargs args;
615 	bool dup_rep = false;
616 	int devargs = 0;
617 	unsigned int i;
618 	int result = 0;
619 
620 	memset(eth_devargs, 0, nb_da * sizeof(*eth_devargs));
621 
622 	result = eth_dev_devargs_tokenise(&args, dargs);
623 	if (result < 0)
624 		goto parse_cleanup;
625 
626 	for (i = 0; i < args.count; i++) {
627 		pair = &args.pairs[i];
628 		if (strcmp("representor", pair->key) == 0) {
629 			if (dup_rep) {
630 				RTE_ETHDEV_LOG_LINE(ERR, "Duplicated representor key: %s",
631 						    pair->value);
632 				result = -1;
633 				goto parse_cleanup;
634 			}
635 
636 			RTE_ETHDEV_LOG_LINE(DEBUG, "Devarg pattern: %s", pair->value);
637 			if (pair->value[0] == '[') {
638 				/* Multiple representor list case */
639 				devargs = eth_dev_tokenise_representor_list(pair->value,
640 									    eth_devargs, nb_da);
641 				if (devargs < 0)
642 					goto parse_cleanup;
643 			} else {
644 				/* Single representor case */
645 				devargs = devargs_parse_representor_ports(eth_devargs, pair->value,
646 									  0, 1);
647 				if (devargs < 0)
648 					goto parse_cleanup;
649 				devargs++;
650 			}
651 			dup_rep = true;
652 		}
653 	}
654 	RTE_ETHDEV_LOG_LINE(DEBUG, "Total devargs parsed %d", devargs);
655 	result = devargs;
656 
657 parse_cleanup:
658 	free(args.str);
659 
660 	return result;
661 }
662 
663 static inline int
664 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
665 		const char *ring_name)
666 {
667 	return snprintf(name, len, "eth_p%d_q%d_%s",
668 			port_id, queue_id, ring_name);
669 }
670 
671 int
672 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
673 		uint16_t queue_id)
674 {
675 	char z_name[RTE_MEMZONE_NAMESIZE];
676 	const struct rte_memzone *mz;
677 	int rc = 0;
678 
679 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
680 			queue_id, ring_name);
681 	if (rc >= RTE_MEMZONE_NAMESIZE) {
682 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
683 		return -ENAMETOOLONG;
684 	}
685 
686 	mz = rte_memzone_lookup(z_name);
687 	if (mz)
688 		rc = rte_memzone_free(mz);
689 	else
690 		rc = -ENOENT;
691 
692 	return rc;
693 }
694 
695 const struct rte_memzone *
696 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
697 			 uint16_t queue_id, size_t size, unsigned int align,
698 			 int socket_id)
699 {
700 	char z_name[RTE_MEMZONE_NAMESIZE];
701 	const struct rte_memzone *mz;
702 	int rc;
703 
704 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
705 			queue_id, ring_name);
706 	if (rc >= RTE_MEMZONE_NAMESIZE) {
707 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
708 		rte_errno = ENAMETOOLONG;
709 		return NULL;
710 	}
711 
712 	mz = rte_memzone_lookup(z_name);
713 	if (mz) {
714 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
715 				size > mz->len ||
716 				((uintptr_t)mz->addr & (align - 1)) != 0) {
717 			RTE_ETHDEV_LOG_LINE(ERR,
718 				"memzone %s does not justify the requested attributes",
719 				mz->name);
720 			return NULL;
721 		}
722 
723 		return mz;
724 	}
725 
726 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
727 			RTE_MEMZONE_IOVA_CONTIG, align);
728 }
729 
730 int
731 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
732 				struct rte_hairpin_peer_info *peer_info,
733 				uint32_t direction)
734 {
735 	struct rte_eth_dev *dev;
736 
737 	if (peer_info == NULL)
738 		return -EINVAL;
739 
740 	/* No need to check the validity again. */
741 	dev = &rte_eth_devices[cur_port];
742 	if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
743 		return -ENOTSUP;
744 
745 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
746 							peer_info, direction);
747 }
748 
749 int
750 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
751 				  uint32_t direction)
752 {
753 	struct rte_eth_dev *dev;
754 
755 	/* No need to check the validity again. */
756 	dev = &rte_eth_devices[cur_port];
757 	if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
758 		return -ENOTSUP;
759 
760 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
761 							  direction);
762 }
763 
764 int
765 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
766 				  struct rte_hairpin_peer_info *cur_info,
767 				  struct rte_hairpin_peer_info *peer_info,
768 				  uint32_t direction)
769 {
770 	struct rte_eth_dev *dev;
771 
772 	/* Current queue information is not mandatory. */
773 	if (peer_info == NULL)
774 		return -EINVAL;
775 
776 	/* No need to check the validity again. */
777 	dev = &rte_eth_devices[peer_port];
778 	if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
779 		return -ENOTSUP;
780 
781 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
782 					cur_info, peer_info, direction);
783 }
784 
785 int
786 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
787 {
788 	static const struct rte_mbuf_dynfield field_desc = {
789 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
790 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
791 		.align = alignof(rte_eth_ip_reassembly_dynfield_t),
792 	};
793 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
794 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
795 	};
796 	int offset;
797 
798 	offset = rte_mbuf_dynfield_register(&field_desc);
799 	if (offset < 0)
800 		return -1;
801 	if (field_offset != NULL)
802 		*field_offset = offset;
803 
804 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
805 	if (offset < 0)
806 		return -1;
807 	if (flag_offset != NULL)
808 		*flag_offset = offset;
809 
810 	return 0;
811 }
812 
813 uint16_t
814 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
815 		struct rte_mbuf **pkts __rte_unused,
816 		uint16_t nb_pkts __rte_unused)
817 {
818 	return 0;
819 }
820 
821 int
822 rte_eth_representor_id_get(uint16_t port_id,
823 			   enum rte_eth_representor_type type,
824 			   int controller, int pf, int representor_port,
825 			   uint16_t *repr_id)
826 {
827 	int ret, n, count;
828 	uint32_t i;
829 	struct rte_eth_representor_info *info = NULL;
830 	size_t size;
831 
832 	if (type == RTE_ETH_REPRESENTOR_NONE)
833 		return 0;
834 	if (repr_id == NULL)
835 		return -EINVAL;
836 
837 	/* Get PMD representor range info. */
838 	ret = rte_eth_representor_info_get(port_id, NULL);
839 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
840 	    controller == -1 && pf == -1) {
841 		/* Direct mapping for legacy VF representor. */
842 		*repr_id = representor_port;
843 		return 0;
844 	} else if (ret < 0) {
845 		return ret;
846 	}
847 	n = ret;
848 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
849 	info = calloc(1, size);
850 	if (info == NULL)
851 		return -ENOMEM;
852 	info->nb_ranges_alloc = n;
853 	ret = rte_eth_representor_info_get(port_id, info);
854 	if (ret < 0)
855 		goto out;
856 
857 	/* Default controller and pf to caller. */
858 	if (controller == -1)
859 		controller = info->controller;
860 	if (pf == -1)
861 		pf = info->pf;
862 
863 	/* Locate representor ID. */
864 	ret = -ENOENT;
865 	for (i = 0; i < info->nb_ranges; ++i) {
866 		if (info->ranges[i].type != type)
867 			continue;
868 		if (info->ranges[i].controller != controller)
869 			continue;
870 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
871 			RTE_ETHDEV_LOG_LINE(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d",
872 				port_id, info->ranges[i].id_base,
873 				info->ranges[i].id_end, i);
874 			continue;
875 
876 		}
877 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
878 		switch (info->ranges[i].type) {
879 		case RTE_ETH_REPRESENTOR_PF:
880 			if (pf < info->ranges[i].pf ||
881 			    pf >= info->ranges[i].pf + count)
882 				continue;
883 			*repr_id = info->ranges[i].id_base +
884 				   (pf - info->ranges[i].pf);
885 			ret = 0;
886 			goto out;
887 		case RTE_ETH_REPRESENTOR_VF:
888 			if (info->ranges[i].pf != pf)
889 				continue;
890 			if (representor_port < info->ranges[i].vf ||
891 			    representor_port >= info->ranges[i].vf + count)
892 				continue;
893 			*repr_id = info->ranges[i].id_base +
894 				   (representor_port - info->ranges[i].vf);
895 			ret = 0;
896 			goto out;
897 		case RTE_ETH_REPRESENTOR_SF:
898 			if (info->ranges[i].pf != pf)
899 				continue;
900 			if (representor_port < info->ranges[i].sf ||
901 			    representor_port >= info->ranges[i].sf + count)
902 				continue;
903 			*repr_id = info->ranges[i].id_base +
904 			      (representor_port - info->ranges[i].sf);
905 			ret = 0;
906 			goto out;
907 		default:
908 			break;
909 		}
910 	}
911 out:
912 	free(info);
913 	return ret;
914 }
915 
916 int
917 rte_eth_switch_domain_alloc(uint16_t *domain_id)
918 {
919 	uint16_t i;
920 
921 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
922 
923 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
924 		if (eth_dev_switch_domains[i].state ==
925 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
926 			eth_dev_switch_domains[i].state =
927 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
928 			*domain_id = i;
929 			return 0;
930 		}
931 	}
932 
933 	return -ENOSPC;
934 }
935 
936 int
937 rte_eth_switch_domain_free(uint16_t domain_id)
938 {
939 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
940 		domain_id >= RTE_MAX_ETHPORTS)
941 		return -EINVAL;
942 
943 	if (eth_dev_switch_domains[domain_id].state !=
944 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
945 		return -EINVAL;
946 
947 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
948 
949 	return 0;
950 }
951