xref: /dpdk/lib/ethdev/ethdev_driver.c (revision 09442498ef736d0a96632cf8b8c15d8ca78a6468)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 
7 #include <rte_kvargs.h>
8 #include <rte_malloc.h>
9 
10 #include "ethdev_driver.h"
11 #include "ethdev_private.h"
12 
13 /**
14  * A set of values to describe the possible states of a switch domain.
15  */
16 enum rte_eth_switch_domain_state {
17 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
18 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
19 };
20 
21 /**
22  * Array of switch domains available for allocation. Array is sized to
23  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
24  * ethdev ports in a single process.
25  */
26 static struct rte_eth_dev_switch {
27 	enum rte_eth_switch_domain_state state;
28 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
29 
30 static struct rte_eth_dev *
31 eth_dev_allocated(const char *name)
32 {
33 	uint16_t i;
34 
35 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
36 
37 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
38 		if (rte_eth_devices[i].data != NULL &&
39 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
40 			return &rte_eth_devices[i];
41 	}
42 	return NULL;
43 }
44 
45 static uint16_t
46 eth_dev_find_free_port(void)
47 {
48 	uint16_t i;
49 
50 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
51 		/* Using shared name field to find a free port. */
52 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
53 			RTE_ASSERT(rte_eth_devices[i].state ==
54 				   RTE_ETH_DEV_UNUSED);
55 			return i;
56 		}
57 	}
58 	return RTE_MAX_ETHPORTS;
59 }
60 
61 static struct rte_eth_dev *
62 eth_dev_get(uint16_t port_id)
63 {
64 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
65 
66 	eth_dev->data = &eth_dev_shared_data->data[port_id];
67 
68 	return eth_dev;
69 }
70 
71 struct rte_eth_dev *
72 rte_eth_dev_allocate(const char *name)
73 {
74 	uint16_t port_id;
75 	struct rte_eth_dev *eth_dev = NULL;
76 	size_t name_len;
77 
78 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
79 	if (name_len == 0) {
80 		RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
81 		return NULL;
82 	}
83 
84 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
85 		RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
86 		return NULL;
87 	}
88 
89 	eth_dev_shared_data_prepare();
90 
91 	/* Synchronize port creation between primary and secondary threads. */
92 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
93 
94 	if (eth_dev_allocated(name) != NULL) {
95 		RTE_ETHDEV_LOG(ERR,
96 			"Ethernet device with name %s already allocated\n",
97 			name);
98 		goto unlock;
99 	}
100 
101 	port_id = eth_dev_find_free_port();
102 	if (port_id == RTE_MAX_ETHPORTS) {
103 		RTE_ETHDEV_LOG(ERR,
104 			"Reached maximum number of Ethernet ports\n");
105 		goto unlock;
106 	}
107 
108 	eth_dev = eth_dev_get(port_id);
109 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
110 	eth_dev->data->port_id = port_id;
111 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
112 	eth_dev->data->mtu = RTE_ETHER_MTU;
113 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
114 
115 unlock:
116 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
117 
118 	return eth_dev;
119 }
120 
121 struct rte_eth_dev *
122 rte_eth_dev_allocated(const char *name)
123 {
124 	struct rte_eth_dev *ethdev;
125 
126 	eth_dev_shared_data_prepare();
127 
128 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
129 
130 	ethdev = eth_dev_allocated(name);
131 
132 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
133 
134 	return ethdev;
135 }
136 
137 /*
138  * Attach to a port already registered by the primary process, which
139  * makes sure that the same device would have the same port ID both
140  * in the primary and secondary process.
141  */
142 struct rte_eth_dev *
143 rte_eth_dev_attach_secondary(const char *name)
144 {
145 	uint16_t i;
146 	struct rte_eth_dev *eth_dev = NULL;
147 
148 	eth_dev_shared_data_prepare();
149 
150 	/* Synchronize port attachment to primary port creation and release. */
151 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
152 
153 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
154 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
155 			break;
156 	}
157 	if (i == RTE_MAX_ETHPORTS) {
158 		RTE_ETHDEV_LOG(ERR,
159 			"Device %s is not driven by the primary process\n",
160 			name);
161 	} else {
162 		eth_dev = eth_dev_get(i);
163 		RTE_ASSERT(eth_dev->data->port_id == i);
164 	}
165 
166 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
167 	return eth_dev;
168 }
169 
170 int
171 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
172 	enum rte_eth_event_type event, void *ret_param)
173 {
174 	struct rte_eth_dev_callback *cb_lst;
175 	struct rte_eth_dev_callback dev_cb;
176 	int rc = 0;
177 
178 	rte_spinlock_lock(&eth_dev_cb_lock);
179 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
180 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
181 			continue;
182 		dev_cb = *cb_lst;
183 		cb_lst->active = 1;
184 		if (ret_param != NULL)
185 			dev_cb.ret_param = ret_param;
186 
187 		rte_spinlock_unlock(&eth_dev_cb_lock);
188 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
189 				dev_cb.cb_arg, dev_cb.ret_param);
190 		rte_spinlock_lock(&eth_dev_cb_lock);
191 		cb_lst->active = 0;
192 	}
193 	rte_spinlock_unlock(&eth_dev_cb_lock);
194 	return rc;
195 }
196 
197 void
198 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
199 {
200 	if (dev == NULL)
201 		return;
202 
203 	/*
204 	 * for secondary process, at that point we expect device
205 	 * to be already 'usable', so shared data and all function pointers
206 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
207 	 */
208 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
209 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
210 
211 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
212 
213 	dev->state = RTE_ETH_DEV_ATTACHED;
214 }
215 
216 int
217 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
218 {
219 	if (eth_dev == NULL)
220 		return -EINVAL;
221 
222 	eth_dev_shared_data_prepare();
223 
224 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
225 		rte_eth_dev_callback_process(eth_dev,
226 				RTE_ETH_EVENT_DESTROY, NULL);
227 
228 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
229 
230 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
231 
232 	eth_dev->state = RTE_ETH_DEV_UNUSED;
233 	eth_dev->device = NULL;
234 	eth_dev->process_private = NULL;
235 	eth_dev->intr_handle = NULL;
236 	eth_dev->rx_pkt_burst = NULL;
237 	eth_dev->tx_pkt_burst = NULL;
238 	eth_dev->tx_pkt_prepare = NULL;
239 	eth_dev->rx_queue_count = NULL;
240 	eth_dev->rx_descriptor_status = NULL;
241 	eth_dev->tx_descriptor_status = NULL;
242 	eth_dev->dev_ops = NULL;
243 
244 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
245 		rte_free(eth_dev->data->rx_queues);
246 		rte_free(eth_dev->data->tx_queues);
247 		rte_free(eth_dev->data->mac_addrs);
248 		rte_free(eth_dev->data->hash_mac_addrs);
249 		rte_free(eth_dev->data->dev_private);
250 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
251 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
252 	}
253 
254 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
255 
256 	return 0;
257 }
258 
259 int
260 rte_eth_dev_create(struct rte_device *device, const char *name,
261 	size_t priv_data_size,
262 	ethdev_bus_specific_init ethdev_bus_specific_init,
263 	void *bus_init_params,
264 	ethdev_init_t ethdev_init, void *init_params)
265 {
266 	struct rte_eth_dev *ethdev;
267 	int retval;
268 
269 	if (*ethdev_init == NULL)
270 		return -EINVAL;
271 
272 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
273 		ethdev = rte_eth_dev_allocate(name);
274 		if (!ethdev)
275 			return -ENODEV;
276 
277 		if (priv_data_size) {
278 			ethdev->data->dev_private = rte_zmalloc_socket(
279 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
280 				device->numa_node);
281 
282 			if (!ethdev->data->dev_private) {
283 				RTE_ETHDEV_LOG(ERR,
284 					"failed to allocate private data\n");
285 				retval = -ENOMEM;
286 				goto probe_failed;
287 			}
288 		}
289 	} else {
290 		ethdev = rte_eth_dev_attach_secondary(name);
291 		if (!ethdev) {
292 			RTE_ETHDEV_LOG(ERR,
293 				"secondary process attach failed, ethdev doesn't exist\n");
294 			return  -ENODEV;
295 		}
296 	}
297 
298 	ethdev->device = device;
299 
300 	if (ethdev_bus_specific_init) {
301 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
302 		if (retval) {
303 			RTE_ETHDEV_LOG(ERR,
304 				"ethdev bus specific initialisation failed\n");
305 			goto probe_failed;
306 		}
307 	}
308 
309 	retval = ethdev_init(ethdev, init_params);
310 	if (retval) {
311 		RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
312 		goto probe_failed;
313 	}
314 
315 	rte_eth_dev_probing_finish(ethdev);
316 
317 	return retval;
318 
319 probe_failed:
320 	rte_eth_dev_release_port(ethdev);
321 	return retval;
322 }
323 
324 int
325 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
326 	ethdev_uninit_t ethdev_uninit)
327 {
328 	int ret;
329 
330 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
331 	if (!ethdev)
332 		return -ENODEV;
333 
334 	if (*ethdev_uninit == NULL)
335 		return -EINVAL;
336 
337 	ret = ethdev_uninit(ethdev);
338 	if (ret)
339 		return ret;
340 
341 	return rte_eth_dev_release_port(ethdev);
342 }
343 
344 struct rte_eth_dev *
345 rte_eth_dev_get_by_name(const char *name)
346 {
347 	uint16_t pid;
348 
349 	if (rte_eth_dev_get_port_by_name(name, &pid))
350 		return NULL;
351 
352 	return &rte_eth_devices[pid];
353 }
354 
355 int
356 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
357 {
358 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
359 		return 1;
360 	return 0;
361 }
362 
363 int
364 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
365 {
366 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
367 		return 1;
368 	return 0;
369 }
370 
371 void
372 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
373 {
374 	if (dev->data->dev_started) {
375 		RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
376 			dev->data->port_id);
377 		return;
378 	}
379 
380 	eth_dev_rx_queue_config(dev, 0);
381 	eth_dev_tx_queue_config(dev, 0);
382 
383 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
384 }
385 
386 static int
387 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
388 {
389 	int state;
390 	struct rte_kvargs_pair *pair;
391 	char *letter;
392 
393 	arglist->str = strdup(str_in);
394 	if (arglist->str == NULL)
395 		return -ENOMEM;
396 
397 	letter = arglist->str;
398 	state = 0;
399 	arglist->count = 0;
400 	pair = &arglist->pairs[0];
401 	while (1) {
402 		switch (state) {
403 		case 0: /* Initial */
404 			if (*letter == '=')
405 				return -EINVAL;
406 			else if (*letter == '\0')
407 				return 0;
408 
409 			state = 1;
410 			pair->key = letter;
411 			/* fallthrough */
412 
413 		case 1: /* Parsing key */
414 			if (*letter == '=') {
415 				*letter = '\0';
416 				pair->value = letter + 1;
417 				state = 2;
418 			} else if (*letter == ',' || *letter == '\0')
419 				return -EINVAL;
420 			break;
421 
422 
423 		case 2: /* Parsing value */
424 			if (*letter == '[')
425 				state = 3;
426 			else if (*letter == ',') {
427 				*letter = '\0';
428 				arglist->count++;
429 				pair = &arglist->pairs[arglist->count];
430 				state = 0;
431 			} else if (*letter == '\0') {
432 				letter--;
433 				arglist->count++;
434 				pair = &arglist->pairs[arglist->count];
435 				state = 0;
436 			}
437 			break;
438 
439 		case 3: /* Parsing list */
440 			if (*letter == ']')
441 				state = 2;
442 			else if (*letter == '\0')
443 				return -EINVAL;
444 			break;
445 		}
446 		letter++;
447 	}
448 }
449 
450 int
451 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
452 {
453 	struct rte_kvargs args;
454 	struct rte_kvargs_pair *pair;
455 	unsigned int i;
456 	int result = 0;
457 
458 	memset(eth_da, 0, sizeof(*eth_da));
459 
460 	result = eth_dev_devargs_tokenise(&args, dargs);
461 	if (result < 0)
462 		goto parse_cleanup;
463 
464 	for (i = 0; i < args.count; i++) {
465 		pair = &args.pairs[i];
466 		if (strcmp("representor", pair->key) == 0) {
467 			if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
468 				RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
469 					dargs);
470 				result = -1;
471 				goto parse_cleanup;
472 			}
473 			result = rte_eth_devargs_parse_representor_ports(
474 					pair->value, eth_da);
475 			if (result < 0)
476 				goto parse_cleanup;
477 		}
478 	}
479 
480 parse_cleanup:
481 	free(args.str);
482 
483 	return result;
484 }
485 
486 static inline int
487 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
488 		const char *ring_name)
489 {
490 	return snprintf(name, len, "eth_p%d_q%d_%s",
491 			port_id, queue_id, ring_name);
492 }
493 
494 int
495 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
496 		uint16_t queue_id)
497 {
498 	char z_name[RTE_MEMZONE_NAMESIZE];
499 	const struct rte_memzone *mz;
500 	int rc = 0;
501 
502 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
503 			queue_id, ring_name);
504 	if (rc >= RTE_MEMZONE_NAMESIZE) {
505 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
506 		return -ENAMETOOLONG;
507 	}
508 
509 	mz = rte_memzone_lookup(z_name);
510 	if (mz)
511 		rc = rte_memzone_free(mz);
512 	else
513 		rc = -ENOENT;
514 
515 	return rc;
516 }
517 
518 const struct rte_memzone *
519 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
520 			 uint16_t queue_id, size_t size, unsigned int align,
521 			 int socket_id)
522 {
523 	char z_name[RTE_MEMZONE_NAMESIZE];
524 	const struct rte_memzone *mz;
525 	int rc;
526 
527 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
528 			queue_id, ring_name);
529 	if (rc >= RTE_MEMZONE_NAMESIZE) {
530 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
531 		rte_errno = ENAMETOOLONG;
532 		return NULL;
533 	}
534 
535 	mz = rte_memzone_lookup(z_name);
536 	if (mz) {
537 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
538 				size > mz->len ||
539 				((uintptr_t)mz->addr & (align - 1)) != 0) {
540 			RTE_ETHDEV_LOG(ERR,
541 				"memzone %s does not justify the requested attributes\n",
542 				mz->name);
543 			return NULL;
544 		}
545 
546 		return mz;
547 	}
548 
549 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
550 			RTE_MEMZONE_IOVA_CONTIG, align);
551 }
552 
553 int
554 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
555 				struct rte_hairpin_peer_info *peer_info,
556 				uint32_t direction)
557 {
558 	struct rte_eth_dev *dev;
559 
560 	if (peer_info == NULL)
561 		return -EINVAL;
562 
563 	/* No need to check the validity again. */
564 	dev = &rte_eth_devices[cur_port];
565 	if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
566 		return -ENOTSUP;
567 
568 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
569 							peer_info, direction);
570 }
571 
572 int
573 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
574 				  uint32_t direction)
575 {
576 	struct rte_eth_dev *dev;
577 
578 	/* No need to check the validity again. */
579 	dev = &rte_eth_devices[cur_port];
580 	if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
581 		return -ENOTSUP;
582 
583 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
584 							  direction);
585 }
586 
587 int
588 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
589 				  struct rte_hairpin_peer_info *cur_info,
590 				  struct rte_hairpin_peer_info *peer_info,
591 				  uint32_t direction)
592 {
593 	struct rte_eth_dev *dev;
594 
595 	/* Current queue information is not mandatory. */
596 	if (peer_info == NULL)
597 		return -EINVAL;
598 
599 	/* No need to check the validity again. */
600 	dev = &rte_eth_devices[peer_port];
601 	if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
602 		return -ENOTSUP;
603 
604 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
605 					cur_info, peer_info, direction);
606 }
607 
608 int
609 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
610 {
611 	static const struct rte_mbuf_dynfield field_desc = {
612 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
613 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
614 		.align = __alignof__(rte_eth_ip_reassembly_dynfield_t),
615 	};
616 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
617 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
618 	};
619 	int offset;
620 
621 	offset = rte_mbuf_dynfield_register(&field_desc);
622 	if (offset < 0)
623 		return -1;
624 	if (field_offset != NULL)
625 		*field_offset = offset;
626 
627 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
628 	if (offset < 0)
629 		return -1;
630 	if (flag_offset != NULL)
631 		*flag_offset = offset;
632 
633 	return 0;
634 }
635 
636 uint16_t
637 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
638 		struct rte_mbuf **pkts __rte_unused,
639 		uint16_t nb_pkts __rte_unused)
640 {
641 	return 0;
642 }
643 
644 int
645 rte_eth_representor_id_get(uint16_t port_id,
646 			   enum rte_eth_representor_type type,
647 			   int controller, int pf, int representor_port,
648 			   uint16_t *repr_id)
649 {
650 	int ret, n, count;
651 	uint32_t i;
652 	struct rte_eth_representor_info *info = NULL;
653 	size_t size;
654 
655 	if (type == RTE_ETH_REPRESENTOR_NONE)
656 		return 0;
657 	if (repr_id == NULL)
658 		return -EINVAL;
659 
660 	/* Get PMD representor range info. */
661 	ret = rte_eth_representor_info_get(port_id, NULL);
662 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
663 	    controller == -1 && pf == -1) {
664 		/* Direct mapping for legacy VF representor. */
665 		*repr_id = representor_port;
666 		return 0;
667 	} else if (ret < 0) {
668 		return ret;
669 	}
670 	n = ret;
671 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
672 	info = calloc(1, size);
673 	if (info == NULL)
674 		return -ENOMEM;
675 	info->nb_ranges_alloc = n;
676 	ret = rte_eth_representor_info_get(port_id, info);
677 	if (ret < 0)
678 		goto out;
679 
680 	/* Default controller and pf to caller. */
681 	if (controller == -1)
682 		controller = info->controller;
683 	if (pf == -1)
684 		pf = info->pf;
685 
686 	/* Locate representor ID. */
687 	ret = -ENOENT;
688 	for (i = 0; i < info->nb_ranges; ++i) {
689 		if (info->ranges[i].type != type)
690 			continue;
691 		if (info->ranges[i].controller != controller)
692 			continue;
693 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
694 			RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
695 				port_id, info->ranges[i].id_base,
696 				info->ranges[i].id_end, i);
697 			continue;
698 
699 		}
700 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
701 		switch (info->ranges[i].type) {
702 		case RTE_ETH_REPRESENTOR_PF:
703 			if (pf < info->ranges[i].pf ||
704 			    pf >= info->ranges[i].pf + count)
705 				continue;
706 			*repr_id = info->ranges[i].id_base +
707 				   (pf - info->ranges[i].pf);
708 			ret = 0;
709 			goto out;
710 		case RTE_ETH_REPRESENTOR_VF:
711 			if (info->ranges[i].pf != pf)
712 				continue;
713 			if (representor_port < info->ranges[i].vf ||
714 			    representor_port >= info->ranges[i].vf + count)
715 				continue;
716 			*repr_id = info->ranges[i].id_base +
717 				   (representor_port - info->ranges[i].vf);
718 			ret = 0;
719 			goto out;
720 		case RTE_ETH_REPRESENTOR_SF:
721 			if (info->ranges[i].pf != pf)
722 				continue;
723 			if (representor_port < info->ranges[i].sf ||
724 			    representor_port >= info->ranges[i].sf + count)
725 				continue;
726 			*repr_id = info->ranges[i].id_base +
727 			      (representor_port - info->ranges[i].sf);
728 			ret = 0;
729 			goto out;
730 		default:
731 			break;
732 		}
733 	}
734 out:
735 	free(info);
736 	return ret;
737 }
738 
739 int
740 rte_eth_switch_domain_alloc(uint16_t *domain_id)
741 {
742 	uint16_t i;
743 
744 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
745 
746 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
747 		if (eth_dev_switch_domains[i].state ==
748 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
749 			eth_dev_switch_domains[i].state =
750 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
751 			*domain_id = i;
752 			return 0;
753 		}
754 	}
755 
756 	return -ENOSPC;
757 }
758 
759 int
760 rte_eth_switch_domain_free(uint16_t domain_id)
761 {
762 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
763 		domain_id >= RTE_MAX_ETHPORTS)
764 		return -EINVAL;
765 
766 	if (eth_dev_switch_domains[domain_id].state !=
767 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
768 		return -EINVAL;
769 
770 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
771 
772 	return 0;
773 }
774