xref: /dpdk/lib/ethdev/ethdev_driver.c (revision db8aee153e43375538667c6f861e981a47574476)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <stdalign.h>
6 #include <stdlib.h>
7 #include <pthread.h>
8 
9 #include <rte_kvargs.h>
10 #include <rte_malloc.h>
11 
12 #include "ethdev_driver.h"
13 #include "ethdev_private.h"
14 
15 /**
16  * A set of values to describe the possible states of a switch domain.
17  */
18 enum rte_eth_switch_domain_state {
19 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
20 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
21 };
22 
23 /**
24  * Array of switch domains available for allocation. Array is sized to
25  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
26  * ethdev ports in a single process.
27  */
28 static struct rte_eth_dev_switch {
29 	enum rte_eth_switch_domain_state state;
30 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
31 
32 static struct rte_eth_dev *
33 eth_dev_allocated(const char *name)
34 {
35 	uint16_t i;
36 
37 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
38 
39 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
40 		if (rte_eth_devices[i].data != NULL &&
41 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
42 			return &rte_eth_devices[i];
43 	}
44 	return NULL;
45 }
46 
47 static uint16_t
48 eth_dev_find_free_port(void)
49 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
50 {
51 	uint16_t i;
52 
53 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
54 		/* Using shared name field to find a free port. */
55 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
56 			RTE_ASSERT(rte_eth_devices[i].state ==
57 				   RTE_ETH_DEV_UNUSED);
58 			return i;
59 		}
60 	}
61 	return RTE_MAX_ETHPORTS;
62 }
63 
64 static struct rte_eth_dev *
65 eth_dev_get(uint16_t port_id)
66 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
67 {
68 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
69 
70 	eth_dev->data = &eth_dev_shared_data->data[port_id];
71 
72 	return eth_dev;
73 }
74 
75 struct rte_eth_dev *
76 rte_eth_dev_allocate(const char *name)
77 {
78 	uint16_t port_id;
79 	struct rte_eth_dev *eth_dev = NULL;
80 	size_t name_len;
81 
82 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
83 	if (name_len == 0) {
84 		RTE_ETHDEV_LOG_LINE(ERR, "Zero length Ethernet device name");
85 		return NULL;
86 	}
87 
88 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
89 		RTE_ETHDEV_LOG_LINE(ERR, "Ethernet device name is too long");
90 		return NULL;
91 	}
92 
93 	/* Synchronize port creation between primary and secondary processes. */
94 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
95 
96 	if (eth_dev_shared_data_prepare() == NULL)
97 		goto unlock;
98 
99 	if (eth_dev_allocated(name) != NULL) {
100 		RTE_ETHDEV_LOG_LINE(ERR,
101 			"Ethernet device with name %s already allocated",
102 			name);
103 		goto unlock;
104 	}
105 
106 	port_id = eth_dev_find_free_port();
107 	if (port_id == RTE_MAX_ETHPORTS) {
108 		RTE_ETHDEV_LOG_LINE(ERR,
109 			"Reached maximum number of Ethernet ports");
110 		goto unlock;
111 	}
112 
113 	eth_dev = eth_dev_get(port_id);
114 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
115 	eth_dev->data->port_id = port_id;
116 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
117 	eth_dev->data->mtu = RTE_ETHER_MTU;
118 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
119 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
120 	eth_dev_shared_data->allocated_ports++;
121 
122 unlock:
123 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
124 
125 	return eth_dev;
126 }
127 
128 struct rte_eth_dev *
129 rte_eth_dev_allocated(const char *name)
130 {
131 	struct rte_eth_dev *ethdev;
132 
133 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
134 
135 	if (eth_dev_shared_data_prepare() != NULL)
136 		ethdev = eth_dev_allocated(name);
137 	else
138 		ethdev = NULL;
139 
140 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
141 
142 	return ethdev;
143 }
144 
145 /*
146  * Attach to a port already registered by the primary process, which
147  * makes sure that the same device would have the same port ID both
148  * in the primary and secondary process.
149  */
150 struct rte_eth_dev *
151 rte_eth_dev_attach_secondary(const char *name)
152 {
153 	uint16_t i;
154 	struct rte_eth_dev *eth_dev = NULL;
155 
156 	/* Synchronize port attachment to primary port creation and release. */
157 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
158 
159 	if (eth_dev_shared_data_prepare() == NULL)
160 		goto unlock;
161 
162 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
163 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
164 			break;
165 	}
166 	if (i == RTE_MAX_ETHPORTS) {
167 		RTE_ETHDEV_LOG_LINE(ERR,
168 			"Device %s is not driven by the primary process",
169 			name);
170 	} else {
171 		eth_dev = eth_dev_get(i);
172 		RTE_ASSERT(eth_dev->data->port_id == i);
173 	}
174 
175 unlock:
176 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
177 	return eth_dev;
178 }
179 
180 int
181 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
182 	enum rte_eth_event_type event, void *ret_param)
183 {
184 	struct rte_eth_dev_callback *cb_lst;
185 	struct rte_eth_dev_callback dev_cb;
186 	int rc = 0;
187 
188 	rte_spinlock_lock(&eth_dev_cb_lock);
189 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
190 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
191 			continue;
192 		dev_cb = *cb_lst;
193 		cb_lst->active = 1;
194 		if (ret_param != NULL)
195 			dev_cb.ret_param = ret_param;
196 
197 		rte_spinlock_unlock(&eth_dev_cb_lock);
198 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
199 				dev_cb.cb_arg, dev_cb.ret_param);
200 		rte_spinlock_lock(&eth_dev_cb_lock);
201 		cb_lst->active = 0;
202 	}
203 	rte_spinlock_unlock(&eth_dev_cb_lock);
204 	return rc;
205 }
206 
207 void
208 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
209 {
210 	if (dev == NULL)
211 		return;
212 
213 	/*
214 	 * for secondary process, at that point we expect device
215 	 * to be already 'usable', so shared data and all function pointers
216 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
217 	 */
218 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
219 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
220 
221 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
222 
223 	dev->state = RTE_ETH_DEV_ATTACHED;
224 }
225 
226 int
227 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
228 {
229 	int ret;
230 
231 	if (eth_dev == NULL)
232 		return -EINVAL;
233 
234 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
235 	if (eth_dev_shared_data_prepare() == NULL)
236 		ret = -EINVAL;
237 	else
238 		ret = 0;
239 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
240 	if (ret != 0)
241 		return ret;
242 
243 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
244 		rte_eth_dev_callback_process(eth_dev,
245 				RTE_ETH_EVENT_DESTROY, NULL);
246 
247 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
248 
249 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
250 
251 	eth_dev->state = RTE_ETH_DEV_UNUSED;
252 	eth_dev->device = NULL;
253 	eth_dev->process_private = NULL;
254 	eth_dev->intr_handle = NULL;
255 	eth_dev->rx_pkt_burst = NULL;
256 	eth_dev->tx_pkt_burst = NULL;
257 	eth_dev->tx_pkt_prepare = NULL;
258 	eth_dev->rx_queue_count = NULL;
259 	eth_dev->rx_descriptor_status = NULL;
260 	eth_dev->tx_descriptor_status = NULL;
261 	eth_dev->dev_ops = NULL;
262 
263 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
264 		rte_free(eth_dev->data->rx_queues);
265 		rte_free(eth_dev->data->tx_queues);
266 		rte_free(eth_dev->data->mac_addrs);
267 		rte_free(eth_dev->data->hash_mac_addrs);
268 		rte_free(eth_dev->data->dev_private);
269 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
270 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
271 		eth_dev->data = NULL;
272 
273 		eth_dev_shared_data->allocated_ports--;
274 		eth_dev_shared_data_release();
275 	}
276 
277 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
278 
279 	return 0;
280 }
281 
282 int
283 rte_eth_dev_create(struct rte_device *device, const char *name,
284 	size_t priv_data_size,
285 	ethdev_bus_specific_init ethdev_bus_specific_init,
286 	void *bus_init_params,
287 	ethdev_init_t ethdev_init, void *init_params)
288 {
289 	struct rte_eth_dev *ethdev;
290 	int retval;
291 
292 	if (*ethdev_init == NULL)
293 		return -EINVAL;
294 
295 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
296 		ethdev = rte_eth_dev_allocate(name);
297 		if (!ethdev)
298 			return -ENODEV;
299 
300 		if (priv_data_size) {
301 			ethdev->data->dev_private = rte_zmalloc_socket(
302 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
303 				device->numa_node);
304 
305 			if (!ethdev->data->dev_private) {
306 				RTE_ETHDEV_LOG_LINE(ERR,
307 					"failed to allocate private data");
308 				retval = -ENOMEM;
309 				goto probe_failed;
310 			}
311 		}
312 	} else {
313 		ethdev = rte_eth_dev_attach_secondary(name);
314 		if (!ethdev) {
315 			RTE_ETHDEV_LOG_LINE(ERR,
316 				"secondary process attach failed, ethdev doesn't exist");
317 			return  -ENODEV;
318 		}
319 	}
320 
321 	ethdev->device = device;
322 
323 	if (ethdev_bus_specific_init) {
324 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
325 		if (retval) {
326 			RTE_ETHDEV_LOG_LINE(ERR,
327 				"ethdev bus specific initialisation failed");
328 			goto probe_failed;
329 		}
330 	}
331 
332 	retval = ethdev_init(ethdev, init_params);
333 	if (retval) {
334 		RTE_ETHDEV_LOG_LINE(ERR, "ethdev initialisation failed");
335 		goto probe_failed;
336 	}
337 
338 	rte_eth_dev_probing_finish(ethdev);
339 
340 	return retval;
341 
342 probe_failed:
343 	rte_eth_dev_release_port(ethdev);
344 	return retval;
345 }
346 
347 int
348 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
349 	ethdev_uninit_t ethdev_uninit)
350 {
351 	int ret;
352 
353 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
354 	if (!ethdev)
355 		return -ENODEV;
356 
357 	if (*ethdev_uninit == NULL)
358 		return -EINVAL;
359 
360 	ret = ethdev_uninit(ethdev);
361 	if (ret)
362 		return ret;
363 
364 	return rte_eth_dev_release_port(ethdev);
365 }
366 
367 struct rte_eth_dev *
368 rte_eth_dev_get_by_name(const char *name)
369 {
370 	uint16_t pid;
371 
372 	if (rte_eth_dev_get_port_by_name(name, &pid))
373 		return NULL;
374 
375 	return &rte_eth_devices[pid];
376 }
377 
378 int
379 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
380 {
381 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
382 		return 1;
383 	return 0;
384 }
385 
386 int
387 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
388 {
389 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
390 		return 1;
391 	return 0;
392 }
393 
394 void
395 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
396 {
397 	if (dev->data->dev_started) {
398 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u must be stopped to allow reset",
399 			dev->data->port_id);
400 		return;
401 	}
402 
403 	eth_dev_rx_queue_config(dev, 0);
404 	eth_dev_tx_queue_config(dev, 0);
405 
406 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
407 }
408 
409 static int
410 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
411 {
412 	int state;
413 	struct rte_kvargs_pair *pair;
414 	char *letter;
415 
416 	arglist->str = strdup(str_in);
417 	if (arglist->str == NULL)
418 		return -ENOMEM;
419 
420 	letter = arglist->str;
421 	state = 0;
422 	arglist->count = 0;
423 	pair = &arglist->pairs[0];
424 	while (1) {
425 		switch (state) {
426 		case 0: /* Initial */
427 			if (*letter == '=')
428 				return -EINVAL;
429 			else if (*letter == '\0')
430 				return 0;
431 
432 			state = 1;
433 			pair->key = letter;
434 			/* fallthrough */
435 
436 		case 1: /* Parsing key */
437 			if (*letter == '=') {
438 				*letter = '\0';
439 				pair->value = letter + 1;
440 				state = 2;
441 			} else if (*letter == ',' || *letter == '\0')
442 				return -EINVAL;
443 			break;
444 
445 
446 		case 2: /* Parsing value */
447 			if (*letter == '[')
448 				state = 3;
449 			else if (*letter == ',') {
450 				*letter = '\0';
451 				arglist->count++;
452 				pair = &arglist->pairs[arglist->count];
453 				state = 0;
454 			} else if (*letter == '\0') {
455 				letter--;
456 				arglist->count++;
457 				pair = &arglist->pairs[arglist->count];
458 				state = 0;
459 			}
460 			break;
461 
462 		case 3: /* Parsing list */
463 			if (*letter == ']')
464 				state = 2;
465 			else if (*letter == '\0')
466 				return -EINVAL;
467 			break;
468 		}
469 		letter++;
470 	}
471 }
472 
473 int
474 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
475 {
476 	struct rte_kvargs args;
477 	struct rte_kvargs_pair *pair;
478 	unsigned int i;
479 	int result = 0;
480 
481 	memset(eth_da, 0, sizeof(*eth_da));
482 
483 	result = eth_dev_devargs_tokenise(&args, dargs);
484 	if (result < 0)
485 		goto parse_cleanup;
486 
487 	for (i = 0; i < args.count; i++) {
488 		pair = &args.pairs[i];
489 		if (strcmp("representor", pair->key) == 0) {
490 			if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
491 				RTE_ETHDEV_LOG_LINE(ERR, "duplicated representor key: %s",
492 					dargs);
493 				result = -1;
494 				goto parse_cleanup;
495 			}
496 			result = rte_eth_devargs_parse_representor_ports(
497 					pair->value, eth_da);
498 			if (result < 0)
499 				goto parse_cleanup;
500 		}
501 	}
502 
503 parse_cleanup:
504 	free(args.str);
505 
506 	return result;
507 }
508 
509 static inline int
510 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
511 		const char *ring_name)
512 {
513 	return snprintf(name, len, "eth_p%d_q%d_%s",
514 			port_id, queue_id, ring_name);
515 }
516 
517 int
518 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
519 		uint16_t queue_id)
520 {
521 	char z_name[RTE_MEMZONE_NAMESIZE];
522 	const struct rte_memzone *mz;
523 	int rc = 0;
524 
525 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
526 			queue_id, ring_name);
527 	if (rc >= RTE_MEMZONE_NAMESIZE) {
528 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
529 		return -ENAMETOOLONG;
530 	}
531 
532 	mz = rte_memzone_lookup(z_name);
533 	if (mz)
534 		rc = rte_memzone_free(mz);
535 	else
536 		rc = -ENOENT;
537 
538 	return rc;
539 }
540 
541 const struct rte_memzone *
542 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
543 			 uint16_t queue_id, size_t size, unsigned int align,
544 			 int socket_id)
545 {
546 	char z_name[RTE_MEMZONE_NAMESIZE];
547 	const struct rte_memzone *mz;
548 	int rc;
549 
550 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
551 			queue_id, ring_name);
552 	if (rc >= RTE_MEMZONE_NAMESIZE) {
553 		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
554 		rte_errno = ENAMETOOLONG;
555 		return NULL;
556 	}
557 
558 	mz = rte_memzone_lookup(z_name);
559 	if (mz) {
560 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
561 				size > mz->len ||
562 				((uintptr_t)mz->addr & (align - 1)) != 0) {
563 			RTE_ETHDEV_LOG_LINE(ERR,
564 				"memzone %s does not justify the requested attributes",
565 				mz->name);
566 			return NULL;
567 		}
568 
569 		return mz;
570 	}
571 
572 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
573 			RTE_MEMZONE_IOVA_CONTIG, align);
574 }
575 
576 int
577 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
578 				struct rte_hairpin_peer_info *peer_info,
579 				uint32_t direction)
580 {
581 	struct rte_eth_dev *dev;
582 
583 	if (peer_info == NULL)
584 		return -EINVAL;
585 
586 	/* No need to check the validity again. */
587 	dev = &rte_eth_devices[cur_port];
588 	if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
589 		return -ENOTSUP;
590 
591 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
592 							peer_info, direction);
593 }
594 
595 int
596 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
597 				  uint32_t direction)
598 {
599 	struct rte_eth_dev *dev;
600 
601 	/* No need to check the validity again. */
602 	dev = &rte_eth_devices[cur_port];
603 	if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
604 		return -ENOTSUP;
605 
606 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
607 							  direction);
608 }
609 
610 int
611 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
612 				  struct rte_hairpin_peer_info *cur_info,
613 				  struct rte_hairpin_peer_info *peer_info,
614 				  uint32_t direction)
615 {
616 	struct rte_eth_dev *dev;
617 
618 	/* Current queue information is not mandatory. */
619 	if (peer_info == NULL)
620 		return -EINVAL;
621 
622 	/* No need to check the validity again. */
623 	dev = &rte_eth_devices[peer_port];
624 	if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
625 		return -ENOTSUP;
626 
627 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
628 					cur_info, peer_info, direction);
629 }
630 
631 int
632 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
633 {
634 	static const struct rte_mbuf_dynfield field_desc = {
635 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
636 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
637 		.align = alignof(rte_eth_ip_reassembly_dynfield_t),
638 	};
639 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
640 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
641 	};
642 	int offset;
643 
644 	offset = rte_mbuf_dynfield_register(&field_desc);
645 	if (offset < 0)
646 		return -1;
647 	if (field_offset != NULL)
648 		*field_offset = offset;
649 
650 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
651 	if (offset < 0)
652 		return -1;
653 	if (flag_offset != NULL)
654 		*flag_offset = offset;
655 
656 	return 0;
657 }
658 
659 uint16_t
660 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
661 		struct rte_mbuf **pkts __rte_unused,
662 		uint16_t nb_pkts __rte_unused)
663 {
664 	return 0;
665 }
666 
667 int
668 rte_eth_representor_id_get(uint16_t port_id,
669 			   enum rte_eth_representor_type type,
670 			   int controller, int pf, int representor_port,
671 			   uint16_t *repr_id)
672 {
673 	int ret, n, count;
674 	uint32_t i;
675 	struct rte_eth_representor_info *info = NULL;
676 	size_t size;
677 
678 	if (type == RTE_ETH_REPRESENTOR_NONE)
679 		return 0;
680 	if (repr_id == NULL)
681 		return -EINVAL;
682 
683 	/* Get PMD representor range info. */
684 	ret = rte_eth_representor_info_get(port_id, NULL);
685 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
686 	    controller == -1 && pf == -1) {
687 		/* Direct mapping for legacy VF representor. */
688 		*repr_id = representor_port;
689 		return 0;
690 	} else if (ret < 0) {
691 		return ret;
692 	}
693 	n = ret;
694 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
695 	info = calloc(1, size);
696 	if (info == NULL)
697 		return -ENOMEM;
698 	info->nb_ranges_alloc = n;
699 	ret = rte_eth_representor_info_get(port_id, info);
700 	if (ret < 0)
701 		goto out;
702 
703 	/* Default controller and pf to caller. */
704 	if (controller == -1)
705 		controller = info->controller;
706 	if (pf == -1)
707 		pf = info->pf;
708 
709 	/* Locate representor ID. */
710 	ret = -ENOENT;
711 	for (i = 0; i < info->nb_ranges; ++i) {
712 		if (info->ranges[i].type != type)
713 			continue;
714 		if (info->ranges[i].controller != controller)
715 			continue;
716 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
717 			RTE_ETHDEV_LOG_LINE(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d",
718 				port_id, info->ranges[i].id_base,
719 				info->ranges[i].id_end, i);
720 			continue;
721 
722 		}
723 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
724 		switch (info->ranges[i].type) {
725 		case RTE_ETH_REPRESENTOR_PF:
726 			if (pf < info->ranges[i].pf ||
727 			    pf >= info->ranges[i].pf + count)
728 				continue;
729 			*repr_id = info->ranges[i].id_base +
730 				   (pf - info->ranges[i].pf);
731 			ret = 0;
732 			goto out;
733 		case RTE_ETH_REPRESENTOR_VF:
734 			if (info->ranges[i].pf != pf)
735 				continue;
736 			if (representor_port < info->ranges[i].vf ||
737 			    representor_port >= info->ranges[i].vf + count)
738 				continue;
739 			*repr_id = info->ranges[i].id_base +
740 				   (representor_port - info->ranges[i].vf);
741 			ret = 0;
742 			goto out;
743 		case RTE_ETH_REPRESENTOR_SF:
744 			if (info->ranges[i].pf != pf)
745 				continue;
746 			if (representor_port < info->ranges[i].sf ||
747 			    representor_port >= info->ranges[i].sf + count)
748 				continue;
749 			*repr_id = info->ranges[i].id_base +
750 			      (representor_port - info->ranges[i].sf);
751 			ret = 0;
752 			goto out;
753 		default:
754 			break;
755 		}
756 	}
757 out:
758 	free(info);
759 	return ret;
760 }
761 
762 int
763 rte_eth_switch_domain_alloc(uint16_t *domain_id)
764 {
765 	uint16_t i;
766 
767 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
768 
769 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
770 		if (eth_dev_switch_domains[i].state ==
771 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
772 			eth_dev_switch_domains[i].state =
773 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
774 			*domain_id = i;
775 			return 0;
776 		}
777 	}
778 
779 	return -ENOSPC;
780 }
781 
782 int
783 rte_eth_switch_domain_free(uint16_t domain_id)
784 {
785 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
786 		domain_id >= RTE_MAX_ETHPORTS)
787 		return -EINVAL;
788 
789 	if (eth_dev_switch_domains[domain_id].state !=
790 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
791 		return -EINVAL;
792 
793 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
794 
795 	return 0;
796 }
797