xref: /dpdk/lib/ethdev/ethdev_driver.c (revision 60531a2c53f4d2b4b96ebb10ca813f62d0a5508d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 #include <pthread.h>
7 
8 #include <rte_kvargs.h>
9 #include <rte_malloc.h>
10 
11 #include "ethdev_driver.h"
12 #include "ethdev_private.h"
13 
14 /**
15  * A set of values to describe the possible states of a switch domain.
16  */
17 enum rte_eth_switch_domain_state {
18 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
19 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
20 };
21 
22 /**
23  * Array of switch domains available for allocation. Array is sized to
24  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
25  * ethdev ports in a single process.
26  */
27 static struct rte_eth_dev_switch {
28 	enum rte_eth_switch_domain_state state;
29 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
30 
31 static struct rte_eth_dev *
32 eth_dev_allocated(const char *name)
33 {
34 	uint16_t i;
35 
36 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
37 
38 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
39 		if (rte_eth_devices[i].data != NULL &&
40 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
41 			return &rte_eth_devices[i];
42 	}
43 	return NULL;
44 }
45 
46 static uint16_t
47 eth_dev_find_free_port(void)
48 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
49 {
50 	uint16_t i;
51 
52 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
53 		/* Using shared name field to find a free port. */
54 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
55 			RTE_ASSERT(rte_eth_devices[i].state ==
56 				   RTE_ETH_DEV_UNUSED);
57 			return i;
58 		}
59 	}
60 	return RTE_MAX_ETHPORTS;
61 }
62 
63 static struct rte_eth_dev *
64 eth_dev_get(uint16_t port_id)
65 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
66 {
67 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
68 
69 	eth_dev->data = &eth_dev_shared_data->data[port_id];
70 
71 	return eth_dev;
72 }
73 
74 struct rte_eth_dev *
75 rte_eth_dev_allocate(const char *name)
76 {
77 	uint16_t port_id;
78 	struct rte_eth_dev *eth_dev = NULL;
79 	size_t name_len;
80 
81 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
82 	if (name_len == 0) {
83 		RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
84 		return NULL;
85 	}
86 
87 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
88 		RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
89 		return NULL;
90 	}
91 
92 	/* Synchronize port creation between primary and secondary processes. */
93 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
94 
95 	if (eth_dev_shared_data_prepare() == NULL)
96 		goto unlock;
97 
98 	if (eth_dev_allocated(name) != NULL) {
99 		RTE_ETHDEV_LOG(ERR,
100 			"Ethernet device with name %s already allocated\n",
101 			name);
102 		goto unlock;
103 	}
104 
105 	port_id = eth_dev_find_free_port();
106 	if (port_id == RTE_MAX_ETHPORTS) {
107 		RTE_ETHDEV_LOG(ERR,
108 			"Reached maximum number of Ethernet ports\n");
109 		goto unlock;
110 	}
111 
112 	eth_dev = eth_dev_get(port_id);
113 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
114 	eth_dev->data->port_id = port_id;
115 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
116 	eth_dev->data->mtu = RTE_ETHER_MTU;
117 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
118 	RTE_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
119 	eth_dev_shared_data->allocated_ports++;
120 
121 unlock:
122 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
123 
124 	return eth_dev;
125 }
126 
127 struct rte_eth_dev *
128 rte_eth_dev_allocated(const char *name)
129 {
130 	struct rte_eth_dev *ethdev;
131 
132 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
133 
134 	if (eth_dev_shared_data_prepare() != NULL)
135 		ethdev = eth_dev_allocated(name);
136 	else
137 		ethdev = NULL;
138 
139 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
140 
141 	return ethdev;
142 }
143 
144 /*
145  * Attach to a port already registered by the primary process, which
146  * makes sure that the same device would have the same port ID both
147  * in the primary and secondary process.
148  */
149 struct rte_eth_dev *
150 rte_eth_dev_attach_secondary(const char *name)
151 {
152 	uint16_t i;
153 	struct rte_eth_dev *eth_dev = NULL;
154 
155 	/* Synchronize port attachment to primary port creation and release. */
156 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
157 
158 	if (eth_dev_shared_data_prepare() == NULL)
159 		goto unlock;
160 
161 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
162 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
163 			break;
164 	}
165 	if (i == RTE_MAX_ETHPORTS) {
166 		RTE_ETHDEV_LOG(ERR,
167 			"Device %s is not driven by the primary process\n",
168 			name);
169 	} else {
170 		eth_dev = eth_dev_get(i);
171 		RTE_ASSERT(eth_dev->data->port_id == i);
172 	}
173 
174 unlock:
175 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
176 	return eth_dev;
177 }
178 
179 int
180 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
181 	enum rte_eth_event_type event, void *ret_param)
182 {
183 	struct rte_eth_dev_callback *cb_lst;
184 	struct rte_eth_dev_callback dev_cb;
185 	int rc = 0;
186 
187 	rte_spinlock_lock(&eth_dev_cb_lock);
188 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
189 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
190 			continue;
191 		dev_cb = *cb_lst;
192 		cb_lst->active = 1;
193 		if (ret_param != NULL)
194 			dev_cb.ret_param = ret_param;
195 
196 		rte_spinlock_unlock(&eth_dev_cb_lock);
197 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
198 				dev_cb.cb_arg, dev_cb.ret_param);
199 		rte_spinlock_lock(&eth_dev_cb_lock);
200 		cb_lst->active = 0;
201 	}
202 	rte_spinlock_unlock(&eth_dev_cb_lock);
203 	return rc;
204 }
205 
206 void
207 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
208 {
209 	if (dev == NULL)
210 		return;
211 
212 	/*
213 	 * for secondary process, at that point we expect device
214 	 * to be already 'usable', so shared data and all function pointers
215 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
216 	 */
217 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
218 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
219 
220 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
221 
222 	dev->state = RTE_ETH_DEV_ATTACHED;
223 }
224 
225 int
226 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
227 {
228 	int ret;
229 
230 	if (eth_dev == NULL)
231 		return -EINVAL;
232 
233 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
234 	if (eth_dev_shared_data_prepare() == NULL)
235 		ret = -EINVAL;
236 	else
237 		ret = 0;
238 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
239 	if (ret != 0)
240 		return ret;
241 
242 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
243 		rte_eth_dev_callback_process(eth_dev,
244 				RTE_ETH_EVENT_DESTROY, NULL);
245 
246 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
247 
248 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
249 
250 	eth_dev->state = RTE_ETH_DEV_UNUSED;
251 	eth_dev->device = NULL;
252 	eth_dev->process_private = NULL;
253 	eth_dev->intr_handle = NULL;
254 	eth_dev->rx_pkt_burst = NULL;
255 	eth_dev->tx_pkt_burst = NULL;
256 	eth_dev->tx_pkt_prepare = NULL;
257 	eth_dev->rx_queue_count = NULL;
258 	eth_dev->rx_descriptor_status = NULL;
259 	eth_dev->tx_descriptor_status = NULL;
260 	eth_dev->dev_ops = NULL;
261 
262 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
263 		rte_free(eth_dev->data->rx_queues);
264 		rte_free(eth_dev->data->tx_queues);
265 		rte_free(eth_dev->data->mac_addrs);
266 		rte_free(eth_dev->data->hash_mac_addrs);
267 		rte_free(eth_dev->data->dev_private);
268 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
269 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
270 		eth_dev->data = NULL;
271 
272 		eth_dev_shared_data->allocated_ports--;
273 		eth_dev_shared_data_release();
274 	}
275 
276 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
277 
278 	return 0;
279 }
280 
281 int
282 rte_eth_dev_create(struct rte_device *device, const char *name,
283 	size_t priv_data_size,
284 	ethdev_bus_specific_init ethdev_bus_specific_init,
285 	void *bus_init_params,
286 	ethdev_init_t ethdev_init, void *init_params)
287 {
288 	struct rte_eth_dev *ethdev;
289 	int retval;
290 
291 	if (*ethdev_init == NULL)
292 		return -EINVAL;
293 
294 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
295 		ethdev = rte_eth_dev_allocate(name);
296 		if (!ethdev)
297 			return -ENODEV;
298 
299 		if (priv_data_size) {
300 			ethdev->data->dev_private = rte_zmalloc_socket(
301 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
302 				device->numa_node);
303 
304 			if (!ethdev->data->dev_private) {
305 				RTE_ETHDEV_LOG(ERR,
306 					"failed to allocate private data\n");
307 				retval = -ENOMEM;
308 				goto probe_failed;
309 			}
310 		}
311 	} else {
312 		ethdev = rte_eth_dev_attach_secondary(name);
313 		if (!ethdev) {
314 			RTE_ETHDEV_LOG(ERR,
315 				"secondary process attach failed, ethdev doesn't exist\n");
316 			return  -ENODEV;
317 		}
318 	}
319 
320 	ethdev->device = device;
321 
322 	if (ethdev_bus_specific_init) {
323 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
324 		if (retval) {
325 			RTE_ETHDEV_LOG(ERR,
326 				"ethdev bus specific initialisation failed\n");
327 			goto probe_failed;
328 		}
329 	}
330 
331 	retval = ethdev_init(ethdev, init_params);
332 	if (retval) {
333 		RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
334 		goto probe_failed;
335 	}
336 
337 	rte_eth_dev_probing_finish(ethdev);
338 
339 	return retval;
340 
341 probe_failed:
342 	rte_eth_dev_release_port(ethdev);
343 	return retval;
344 }
345 
346 int
347 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
348 	ethdev_uninit_t ethdev_uninit)
349 {
350 	int ret;
351 
352 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
353 	if (!ethdev)
354 		return -ENODEV;
355 
356 	if (*ethdev_uninit == NULL)
357 		return -EINVAL;
358 
359 	ret = ethdev_uninit(ethdev);
360 	if (ret)
361 		return ret;
362 
363 	return rte_eth_dev_release_port(ethdev);
364 }
365 
366 struct rte_eth_dev *
367 rte_eth_dev_get_by_name(const char *name)
368 {
369 	uint16_t pid;
370 
371 	if (rte_eth_dev_get_port_by_name(name, &pid))
372 		return NULL;
373 
374 	return &rte_eth_devices[pid];
375 }
376 
377 int
378 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
379 {
380 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
381 		return 1;
382 	return 0;
383 }
384 
385 int
386 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
387 {
388 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
389 		return 1;
390 	return 0;
391 }
392 
393 void
394 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
395 {
396 	if (dev->data->dev_started) {
397 		RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
398 			dev->data->port_id);
399 		return;
400 	}
401 
402 	eth_dev_rx_queue_config(dev, 0);
403 	eth_dev_tx_queue_config(dev, 0);
404 
405 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
406 }
407 
408 static int
409 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
410 {
411 	int state;
412 	struct rte_kvargs_pair *pair;
413 	char *letter;
414 
415 	arglist->str = strdup(str_in);
416 	if (arglist->str == NULL)
417 		return -ENOMEM;
418 
419 	letter = arglist->str;
420 	state = 0;
421 	arglist->count = 0;
422 	pair = &arglist->pairs[0];
423 	while (1) {
424 		switch (state) {
425 		case 0: /* Initial */
426 			if (*letter == '=')
427 				return -EINVAL;
428 			else if (*letter == '\0')
429 				return 0;
430 
431 			state = 1;
432 			pair->key = letter;
433 			/* fallthrough */
434 
435 		case 1: /* Parsing key */
436 			if (*letter == '=') {
437 				*letter = '\0';
438 				pair->value = letter + 1;
439 				state = 2;
440 			} else if (*letter == ',' || *letter == '\0')
441 				return -EINVAL;
442 			break;
443 
444 
445 		case 2: /* Parsing value */
446 			if (*letter == '[')
447 				state = 3;
448 			else if (*letter == ',') {
449 				*letter = '\0';
450 				arglist->count++;
451 				pair = &arglist->pairs[arglist->count];
452 				state = 0;
453 			} else if (*letter == '\0') {
454 				letter--;
455 				arglist->count++;
456 				pair = &arglist->pairs[arglist->count];
457 				state = 0;
458 			}
459 			break;
460 
461 		case 3: /* Parsing list */
462 			if (*letter == ']')
463 				state = 2;
464 			else if (*letter == '\0')
465 				return -EINVAL;
466 			break;
467 		}
468 		letter++;
469 	}
470 }
471 
472 int
473 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
474 {
475 	struct rte_kvargs args;
476 	struct rte_kvargs_pair *pair;
477 	unsigned int i;
478 	int result = 0;
479 
480 	memset(eth_da, 0, sizeof(*eth_da));
481 
482 	result = eth_dev_devargs_tokenise(&args, dargs);
483 	if (result < 0)
484 		goto parse_cleanup;
485 
486 	for (i = 0; i < args.count; i++) {
487 		pair = &args.pairs[i];
488 		if (strcmp("representor", pair->key) == 0) {
489 			if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
490 				RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
491 					dargs);
492 				result = -1;
493 				goto parse_cleanup;
494 			}
495 			result = rte_eth_devargs_parse_representor_ports(
496 					pair->value, eth_da);
497 			if (result < 0)
498 				goto parse_cleanup;
499 		}
500 	}
501 
502 parse_cleanup:
503 	free(args.str);
504 
505 	return result;
506 }
507 
508 static inline int
509 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
510 		const char *ring_name)
511 {
512 	return snprintf(name, len, "eth_p%d_q%d_%s",
513 			port_id, queue_id, ring_name);
514 }
515 
516 int
517 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
518 		uint16_t queue_id)
519 {
520 	char z_name[RTE_MEMZONE_NAMESIZE];
521 	const struct rte_memzone *mz;
522 	int rc = 0;
523 
524 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
525 			queue_id, ring_name);
526 	if (rc >= RTE_MEMZONE_NAMESIZE) {
527 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
528 		return -ENAMETOOLONG;
529 	}
530 
531 	mz = rte_memzone_lookup(z_name);
532 	if (mz)
533 		rc = rte_memzone_free(mz);
534 	else
535 		rc = -ENOENT;
536 
537 	return rc;
538 }
539 
540 const struct rte_memzone *
541 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
542 			 uint16_t queue_id, size_t size, unsigned int align,
543 			 int socket_id)
544 {
545 	char z_name[RTE_MEMZONE_NAMESIZE];
546 	const struct rte_memzone *mz;
547 	int rc;
548 
549 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
550 			queue_id, ring_name);
551 	if (rc >= RTE_MEMZONE_NAMESIZE) {
552 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
553 		rte_errno = ENAMETOOLONG;
554 		return NULL;
555 	}
556 
557 	mz = rte_memzone_lookup(z_name);
558 	if (mz) {
559 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
560 				size > mz->len ||
561 				((uintptr_t)mz->addr & (align - 1)) != 0) {
562 			RTE_ETHDEV_LOG(ERR,
563 				"memzone %s does not justify the requested attributes\n",
564 				mz->name);
565 			return NULL;
566 		}
567 
568 		return mz;
569 	}
570 
571 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
572 			RTE_MEMZONE_IOVA_CONTIG, align);
573 }
574 
575 int
576 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
577 				struct rte_hairpin_peer_info *peer_info,
578 				uint32_t direction)
579 {
580 	struct rte_eth_dev *dev;
581 
582 	if (peer_info == NULL)
583 		return -EINVAL;
584 
585 	/* No need to check the validity again. */
586 	dev = &rte_eth_devices[cur_port];
587 	if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
588 		return -ENOTSUP;
589 
590 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
591 							peer_info, direction);
592 }
593 
594 int
595 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
596 				  uint32_t direction)
597 {
598 	struct rte_eth_dev *dev;
599 
600 	/* No need to check the validity again. */
601 	dev = &rte_eth_devices[cur_port];
602 	if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
603 		return -ENOTSUP;
604 
605 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
606 							  direction);
607 }
608 
609 int
610 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
611 				  struct rte_hairpin_peer_info *cur_info,
612 				  struct rte_hairpin_peer_info *peer_info,
613 				  uint32_t direction)
614 {
615 	struct rte_eth_dev *dev;
616 
617 	/* Current queue information is not mandatory. */
618 	if (peer_info == NULL)
619 		return -EINVAL;
620 
621 	/* No need to check the validity again. */
622 	dev = &rte_eth_devices[peer_port];
623 	if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
624 		return -ENOTSUP;
625 
626 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
627 					cur_info, peer_info, direction);
628 }
629 
630 int
631 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
632 {
633 	static const struct rte_mbuf_dynfield field_desc = {
634 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
635 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
636 		.align = __alignof__(rte_eth_ip_reassembly_dynfield_t),
637 	};
638 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
639 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
640 	};
641 	int offset;
642 
643 	offset = rte_mbuf_dynfield_register(&field_desc);
644 	if (offset < 0)
645 		return -1;
646 	if (field_offset != NULL)
647 		*field_offset = offset;
648 
649 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
650 	if (offset < 0)
651 		return -1;
652 	if (flag_offset != NULL)
653 		*flag_offset = offset;
654 
655 	return 0;
656 }
657 
658 uint16_t
659 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
660 		struct rte_mbuf **pkts __rte_unused,
661 		uint16_t nb_pkts __rte_unused)
662 {
663 	return 0;
664 }
665 
666 int
667 rte_eth_representor_id_get(uint16_t port_id,
668 			   enum rte_eth_representor_type type,
669 			   int controller, int pf, int representor_port,
670 			   uint16_t *repr_id)
671 {
672 	int ret, n, count;
673 	uint32_t i;
674 	struct rte_eth_representor_info *info = NULL;
675 	size_t size;
676 
677 	if (type == RTE_ETH_REPRESENTOR_NONE)
678 		return 0;
679 	if (repr_id == NULL)
680 		return -EINVAL;
681 
682 	/* Get PMD representor range info. */
683 	ret = rte_eth_representor_info_get(port_id, NULL);
684 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
685 	    controller == -1 && pf == -1) {
686 		/* Direct mapping for legacy VF representor. */
687 		*repr_id = representor_port;
688 		return 0;
689 	} else if (ret < 0) {
690 		return ret;
691 	}
692 	n = ret;
693 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
694 	info = calloc(1, size);
695 	if (info == NULL)
696 		return -ENOMEM;
697 	info->nb_ranges_alloc = n;
698 	ret = rte_eth_representor_info_get(port_id, info);
699 	if (ret < 0)
700 		goto out;
701 
702 	/* Default controller and pf to caller. */
703 	if (controller == -1)
704 		controller = info->controller;
705 	if (pf == -1)
706 		pf = info->pf;
707 
708 	/* Locate representor ID. */
709 	ret = -ENOENT;
710 	for (i = 0; i < info->nb_ranges; ++i) {
711 		if (info->ranges[i].type != type)
712 			continue;
713 		if (info->ranges[i].controller != controller)
714 			continue;
715 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
716 			RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
717 				port_id, info->ranges[i].id_base,
718 				info->ranges[i].id_end, i);
719 			continue;
720 
721 		}
722 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
723 		switch (info->ranges[i].type) {
724 		case RTE_ETH_REPRESENTOR_PF:
725 			if (pf < info->ranges[i].pf ||
726 			    pf >= info->ranges[i].pf + count)
727 				continue;
728 			*repr_id = info->ranges[i].id_base +
729 				   (pf - info->ranges[i].pf);
730 			ret = 0;
731 			goto out;
732 		case RTE_ETH_REPRESENTOR_VF:
733 			if (info->ranges[i].pf != pf)
734 				continue;
735 			if (representor_port < info->ranges[i].vf ||
736 			    representor_port >= info->ranges[i].vf + count)
737 				continue;
738 			*repr_id = info->ranges[i].id_base +
739 				   (representor_port - info->ranges[i].vf);
740 			ret = 0;
741 			goto out;
742 		case RTE_ETH_REPRESENTOR_SF:
743 			if (info->ranges[i].pf != pf)
744 				continue;
745 			if (representor_port < info->ranges[i].sf ||
746 			    representor_port >= info->ranges[i].sf + count)
747 				continue;
748 			*repr_id = info->ranges[i].id_base +
749 			      (representor_port - info->ranges[i].sf);
750 			ret = 0;
751 			goto out;
752 		default:
753 			break;
754 		}
755 	}
756 out:
757 	free(info);
758 	return ret;
759 }
760 
761 int
762 rte_eth_switch_domain_alloc(uint16_t *domain_id)
763 {
764 	uint16_t i;
765 
766 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
767 
768 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
769 		if (eth_dev_switch_domains[i].state ==
770 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
771 			eth_dev_switch_domains[i].state =
772 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
773 			*domain_id = i;
774 			return 0;
775 		}
776 	}
777 
778 	return -ENOSPC;
779 }
780 
781 int
782 rte_eth_switch_domain_free(uint16_t domain_id)
783 {
784 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
785 		domain_id >= RTE_MAX_ETHPORTS)
786 		return -EINVAL;
787 
788 	if (eth_dev_switch_domains[domain_id].state !=
789 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
790 		return -EINVAL;
791 
792 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
793 
794 	return 0;
795 }
796