xref: /dpdk/lib/ethdev/ethdev_driver.c (revision f4eac3a09c51a1a2dab1f2fd3a10fe0619286a0d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <rte_kvargs.h>
6 #include <rte_malloc.h>
7 
8 #include "ethdev_driver.h"
9 #include "ethdev_private.h"
10 
11 /**
12  * A set of values to describe the possible states of a switch domain.
13  */
14 enum rte_eth_switch_domain_state {
15 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
16 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
17 };
18 
19 /**
20  * Array of switch domains available for allocation. Array is sized to
21  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
22  * ethdev ports in a single process.
23  */
24 static struct rte_eth_dev_switch {
25 	enum rte_eth_switch_domain_state state;
26 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
27 
28 static struct rte_eth_dev *
29 eth_dev_allocated(const char *name)
30 {
31 	uint16_t i;
32 
33 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
34 
35 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
36 		if (rte_eth_devices[i].data != NULL &&
37 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
38 			return &rte_eth_devices[i];
39 	}
40 	return NULL;
41 }
42 
43 static uint16_t
44 eth_dev_find_free_port(void)
45 {
46 	uint16_t i;
47 
48 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
49 		/* Using shared name field to find a free port. */
50 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
51 			RTE_ASSERT(rte_eth_devices[i].state ==
52 				   RTE_ETH_DEV_UNUSED);
53 			return i;
54 		}
55 	}
56 	return RTE_MAX_ETHPORTS;
57 }
58 
59 static struct rte_eth_dev *
60 eth_dev_get(uint16_t port_id)
61 {
62 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
63 
64 	eth_dev->data = &eth_dev_shared_data->data[port_id];
65 
66 	return eth_dev;
67 }
68 
69 struct rte_eth_dev *
70 rte_eth_dev_allocate(const char *name)
71 {
72 	uint16_t port_id;
73 	struct rte_eth_dev *eth_dev = NULL;
74 	size_t name_len;
75 
76 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
77 	if (name_len == 0) {
78 		RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
79 		return NULL;
80 	}
81 
82 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
83 		RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
84 		return NULL;
85 	}
86 
87 	eth_dev_shared_data_prepare();
88 
89 	/* Synchronize port creation between primary and secondary threads. */
90 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
91 
92 	if (eth_dev_allocated(name) != NULL) {
93 		RTE_ETHDEV_LOG(ERR,
94 			"Ethernet device with name %s already allocated\n",
95 			name);
96 		goto unlock;
97 	}
98 
99 	port_id = eth_dev_find_free_port();
100 	if (port_id == RTE_MAX_ETHPORTS) {
101 		RTE_ETHDEV_LOG(ERR,
102 			"Reached maximum number of Ethernet ports\n");
103 		goto unlock;
104 	}
105 
106 	eth_dev = eth_dev_get(port_id);
107 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
108 	eth_dev->data->port_id = port_id;
109 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
110 	eth_dev->data->mtu = RTE_ETHER_MTU;
111 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
112 
113 unlock:
114 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
115 
116 	return eth_dev;
117 }
118 
119 struct rte_eth_dev *
120 rte_eth_dev_allocated(const char *name)
121 {
122 	struct rte_eth_dev *ethdev;
123 
124 	eth_dev_shared_data_prepare();
125 
126 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
127 
128 	ethdev = eth_dev_allocated(name);
129 
130 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
131 
132 	return ethdev;
133 }
134 
135 /*
136  * Attach to a port already registered by the primary process, which
137  * makes sure that the same device would have the same port ID both
138  * in the primary and secondary process.
139  */
140 struct rte_eth_dev *
141 rte_eth_dev_attach_secondary(const char *name)
142 {
143 	uint16_t i;
144 	struct rte_eth_dev *eth_dev = NULL;
145 
146 	eth_dev_shared_data_prepare();
147 
148 	/* Synchronize port attachment to primary port creation and release. */
149 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
150 
151 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
152 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
153 			break;
154 	}
155 	if (i == RTE_MAX_ETHPORTS) {
156 		RTE_ETHDEV_LOG(ERR,
157 			"Device %s is not driven by the primary process\n",
158 			name);
159 	} else {
160 		eth_dev = eth_dev_get(i);
161 		RTE_ASSERT(eth_dev->data->port_id == i);
162 	}
163 
164 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
165 	return eth_dev;
166 }
167 
168 int
169 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
170 	enum rte_eth_event_type event, void *ret_param)
171 {
172 	struct rte_eth_dev_callback *cb_lst;
173 	struct rte_eth_dev_callback dev_cb;
174 	int rc = 0;
175 
176 	rte_spinlock_lock(&eth_dev_cb_lock);
177 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
178 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
179 			continue;
180 		dev_cb = *cb_lst;
181 		cb_lst->active = 1;
182 		if (ret_param != NULL)
183 			dev_cb.ret_param = ret_param;
184 
185 		rte_spinlock_unlock(&eth_dev_cb_lock);
186 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
187 				dev_cb.cb_arg, dev_cb.ret_param);
188 		rte_spinlock_lock(&eth_dev_cb_lock);
189 		cb_lst->active = 0;
190 	}
191 	rte_spinlock_unlock(&eth_dev_cb_lock);
192 	return rc;
193 }
194 
195 void
196 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
197 {
198 	if (dev == NULL)
199 		return;
200 
201 	/*
202 	 * for secondary process, at that point we expect device
203 	 * to be already 'usable', so shared data and all function pointers
204 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
205 	 */
206 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
207 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
208 
209 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
210 
211 	dev->state = RTE_ETH_DEV_ATTACHED;
212 }
213 
214 int
215 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
216 {
217 	if (eth_dev == NULL)
218 		return -EINVAL;
219 
220 	eth_dev_shared_data_prepare();
221 
222 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
223 		rte_eth_dev_callback_process(eth_dev,
224 				RTE_ETH_EVENT_DESTROY, NULL);
225 
226 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
227 
228 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
229 
230 	eth_dev->state = RTE_ETH_DEV_UNUSED;
231 	eth_dev->device = NULL;
232 	eth_dev->process_private = NULL;
233 	eth_dev->intr_handle = NULL;
234 	eth_dev->rx_pkt_burst = NULL;
235 	eth_dev->tx_pkt_burst = NULL;
236 	eth_dev->tx_pkt_prepare = NULL;
237 	eth_dev->rx_queue_count = NULL;
238 	eth_dev->rx_descriptor_status = NULL;
239 	eth_dev->tx_descriptor_status = NULL;
240 	eth_dev->dev_ops = NULL;
241 
242 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
243 		rte_free(eth_dev->data->rx_queues);
244 		rte_free(eth_dev->data->tx_queues);
245 		rte_free(eth_dev->data->mac_addrs);
246 		rte_free(eth_dev->data->hash_mac_addrs);
247 		rte_free(eth_dev->data->dev_private);
248 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
249 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
250 	}
251 
252 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
253 
254 	return 0;
255 }
256 
257 int
258 rte_eth_dev_create(struct rte_device *device, const char *name,
259 	size_t priv_data_size,
260 	ethdev_bus_specific_init ethdev_bus_specific_init,
261 	void *bus_init_params,
262 	ethdev_init_t ethdev_init, void *init_params)
263 {
264 	struct rte_eth_dev *ethdev;
265 	int retval;
266 
267 	RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
268 
269 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
270 		ethdev = rte_eth_dev_allocate(name);
271 		if (!ethdev)
272 			return -ENODEV;
273 
274 		if (priv_data_size) {
275 			ethdev->data->dev_private = rte_zmalloc_socket(
276 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
277 				device->numa_node);
278 
279 			if (!ethdev->data->dev_private) {
280 				RTE_ETHDEV_LOG(ERR,
281 					"failed to allocate private data\n");
282 				retval = -ENOMEM;
283 				goto probe_failed;
284 			}
285 		}
286 	} else {
287 		ethdev = rte_eth_dev_attach_secondary(name);
288 		if (!ethdev) {
289 			RTE_ETHDEV_LOG(ERR,
290 				"secondary process attach failed, ethdev doesn't exist\n");
291 			return  -ENODEV;
292 		}
293 	}
294 
295 	ethdev->device = device;
296 
297 	if (ethdev_bus_specific_init) {
298 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
299 		if (retval) {
300 			RTE_ETHDEV_LOG(ERR,
301 				"ethdev bus specific initialisation failed\n");
302 			goto probe_failed;
303 		}
304 	}
305 
306 	retval = ethdev_init(ethdev, init_params);
307 	if (retval) {
308 		RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
309 		goto probe_failed;
310 	}
311 
312 	rte_eth_dev_probing_finish(ethdev);
313 
314 	return retval;
315 
316 probe_failed:
317 	rte_eth_dev_release_port(ethdev);
318 	return retval;
319 }
320 
321 int
322 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
323 	ethdev_uninit_t ethdev_uninit)
324 {
325 	int ret;
326 
327 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
328 	if (!ethdev)
329 		return -ENODEV;
330 
331 	RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
332 
333 	ret = ethdev_uninit(ethdev);
334 	if (ret)
335 		return ret;
336 
337 	return rte_eth_dev_release_port(ethdev);
338 }
339 
340 struct rte_eth_dev *
341 rte_eth_dev_get_by_name(const char *name)
342 {
343 	uint16_t pid;
344 
345 	if (rte_eth_dev_get_port_by_name(name, &pid))
346 		return NULL;
347 
348 	return &rte_eth_devices[pid];
349 }
350 
351 int
352 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
353 {
354 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
355 		return 1;
356 	return 0;
357 }
358 
359 int
360 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
361 {
362 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
363 		return 1;
364 	return 0;
365 }
366 
367 void
368 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
369 {
370 	if (dev->data->dev_started) {
371 		RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
372 			dev->data->port_id);
373 		return;
374 	}
375 
376 	eth_dev_rx_queue_config(dev, 0);
377 	eth_dev_tx_queue_config(dev, 0);
378 
379 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
380 }
381 
382 static int
383 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
384 {
385 	int state;
386 	struct rte_kvargs_pair *pair;
387 	char *letter;
388 
389 	arglist->str = strdup(str_in);
390 	if (arglist->str == NULL)
391 		return -ENOMEM;
392 
393 	letter = arglist->str;
394 	state = 0;
395 	arglist->count = 0;
396 	pair = &arglist->pairs[0];
397 	while (1) {
398 		switch (state) {
399 		case 0: /* Initial */
400 			if (*letter == '=')
401 				return -EINVAL;
402 			else if (*letter == '\0')
403 				return 0;
404 
405 			state = 1;
406 			pair->key = letter;
407 			/* fallthrough */
408 
409 		case 1: /* Parsing key */
410 			if (*letter == '=') {
411 				*letter = '\0';
412 				pair->value = letter + 1;
413 				state = 2;
414 			} else if (*letter == ',' || *letter == '\0')
415 				return -EINVAL;
416 			break;
417 
418 
419 		case 2: /* Parsing value */
420 			if (*letter == '[')
421 				state = 3;
422 			else if (*letter == ',') {
423 				*letter = '\0';
424 				arglist->count++;
425 				pair = &arglist->pairs[arglist->count];
426 				state = 0;
427 			} else if (*letter == '\0') {
428 				letter--;
429 				arglist->count++;
430 				pair = &arglist->pairs[arglist->count];
431 				state = 0;
432 			}
433 			break;
434 
435 		case 3: /* Parsing list */
436 			if (*letter == ']')
437 				state = 2;
438 			else if (*letter == '\0')
439 				return -EINVAL;
440 			break;
441 		}
442 		letter++;
443 	}
444 }
445 
446 int
447 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
448 {
449 	struct rte_kvargs args;
450 	struct rte_kvargs_pair *pair;
451 	unsigned int i;
452 	int result = 0;
453 
454 	memset(eth_da, 0, sizeof(*eth_da));
455 
456 	result = eth_dev_devargs_tokenise(&args, dargs);
457 	if (result < 0)
458 		goto parse_cleanup;
459 
460 	for (i = 0; i < args.count; i++) {
461 		pair = &args.pairs[i];
462 		if (strcmp("representor", pair->key) == 0) {
463 			if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
464 				RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
465 					dargs);
466 				result = -1;
467 				goto parse_cleanup;
468 			}
469 			result = rte_eth_devargs_parse_representor_ports(
470 					pair->value, eth_da);
471 			if (result < 0)
472 				goto parse_cleanup;
473 		}
474 	}
475 
476 parse_cleanup:
477 	free(args.str);
478 
479 	return result;
480 }
481 
482 static inline int
483 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
484 		const char *ring_name)
485 {
486 	return snprintf(name, len, "eth_p%d_q%d_%s",
487 			port_id, queue_id, ring_name);
488 }
489 
490 int
491 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
492 		uint16_t queue_id)
493 {
494 	char z_name[RTE_MEMZONE_NAMESIZE];
495 	const struct rte_memzone *mz;
496 	int rc = 0;
497 
498 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
499 			queue_id, ring_name);
500 	if (rc >= RTE_MEMZONE_NAMESIZE) {
501 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
502 		return -ENAMETOOLONG;
503 	}
504 
505 	mz = rte_memzone_lookup(z_name);
506 	if (mz)
507 		rc = rte_memzone_free(mz);
508 	else
509 		rc = -ENOENT;
510 
511 	return rc;
512 }
513 
514 const struct rte_memzone *
515 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
516 			 uint16_t queue_id, size_t size, unsigned int align,
517 			 int socket_id)
518 {
519 	char z_name[RTE_MEMZONE_NAMESIZE];
520 	const struct rte_memzone *mz;
521 	int rc;
522 
523 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
524 			queue_id, ring_name);
525 	if (rc >= RTE_MEMZONE_NAMESIZE) {
526 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
527 		rte_errno = ENAMETOOLONG;
528 		return NULL;
529 	}
530 
531 	mz = rte_memzone_lookup(z_name);
532 	if (mz) {
533 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
534 				size > mz->len ||
535 				((uintptr_t)mz->addr & (align - 1)) != 0) {
536 			RTE_ETHDEV_LOG(ERR,
537 				"memzone %s does not justify the requested attributes\n",
538 				mz->name);
539 			return NULL;
540 		}
541 
542 		return mz;
543 	}
544 
545 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
546 			RTE_MEMZONE_IOVA_CONTIG, align);
547 }
548 
549 int
550 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
551 				struct rte_hairpin_peer_info *peer_info,
552 				uint32_t direction)
553 {
554 	struct rte_eth_dev *dev;
555 
556 	if (peer_info == NULL)
557 		return -EINVAL;
558 
559 	/* No need to check the validity again. */
560 	dev = &rte_eth_devices[cur_port];
561 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
562 				-ENOTSUP);
563 
564 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
565 							peer_info, direction);
566 }
567 
568 int
569 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
570 				  uint32_t direction)
571 {
572 	struct rte_eth_dev *dev;
573 
574 	/* No need to check the validity again. */
575 	dev = &rte_eth_devices[cur_port];
576 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
577 				-ENOTSUP);
578 
579 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
580 							  direction);
581 }
582 
583 int
584 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
585 				  struct rte_hairpin_peer_info *cur_info,
586 				  struct rte_hairpin_peer_info *peer_info,
587 				  uint32_t direction)
588 {
589 	struct rte_eth_dev *dev;
590 
591 	/* Current queue information is not mandatory. */
592 	if (peer_info == NULL)
593 		return -EINVAL;
594 
595 	/* No need to check the validity again. */
596 	dev = &rte_eth_devices[peer_port];
597 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
598 				-ENOTSUP);
599 
600 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
601 					cur_info, peer_info, direction);
602 }
603 
604 int
605 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
606 {
607 	static const struct rte_mbuf_dynfield field_desc = {
608 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
609 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
610 		.align = __alignof__(rte_eth_ip_reassembly_dynfield_t),
611 	};
612 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
613 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
614 	};
615 	int offset;
616 
617 	offset = rte_mbuf_dynfield_register(&field_desc);
618 	if (offset < 0)
619 		return -1;
620 	if (field_offset != NULL)
621 		*field_offset = offset;
622 
623 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
624 	if (offset < 0)
625 		return -1;
626 	if (flag_offset != NULL)
627 		*flag_offset = offset;
628 
629 	return 0;
630 }
631 
632 uint16_t
633 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
634 		struct rte_mbuf **pkts __rte_unused,
635 		uint16_t nb_pkts __rte_unused)
636 {
637 	return 0;
638 }
639 
640 int
641 rte_eth_representor_id_get(uint16_t port_id,
642 			   enum rte_eth_representor_type type,
643 			   int controller, int pf, int representor_port,
644 			   uint16_t *repr_id)
645 {
646 	int ret, n, count;
647 	uint32_t i;
648 	struct rte_eth_representor_info *info = NULL;
649 	size_t size;
650 
651 	if (type == RTE_ETH_REPRESENTOR_NONE)
652 		return 0;
653 	if (repr_id == NULL)
654 		return -EINVAL;
655 
656 	/* Get PMD representor range info. */
657 	ret = rte_eth_representor_info_get(port_id, NULL);
658 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
659 	    controller == -1 && pf == -1) {
660 		/* Direct mapping for legacy VF representor. */
661 		*repr_id = representor_port;
662 		return 0;
663 	} else if (ret < 0) {
664 		return ret;
665 	}
666 	n = ret;
667 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
668 	info = calloc(1, size);
669 	if (info == NULL)
670 		return -ENOMEM;
671 	info->nb_ranges_alloc = n;
672 	ret = rte_eth_representor_info_get(port_id, info);
673 	if (ret < 0)
674 		goto out;
675 
676 	/* Default controller and pf to caller. */
677 	if (controller == -1)
678 		controller = info->controller;
679 	if (pf == -1)
680 		pf = info->pf;
681 
682 	/* Locate representor ID. */
683 	ret = -ENOENT;
684 	for (i = 0; i < info->nb_ranges; ++i) {
685 		if (info->ranges[i].type != type)
686 			continue;
687 		if (info->ranges[i].controller != controller)
688 			continue;
689 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
690 			RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
691 				port_id, info->ranges[i].id_base,
692 				info->ranges[i].id_end, i);
693 			continue;
694 
695 		}
696 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
697 		switch (info->ranges[i].type) {
698 		case RTE_ETH_REPRESENTOR_PF:
699 			if (pf < info->ranges[i].pf ||
700 			    pf >= info->ranges[i].pf + count)
701 				continue;
702 			*repr_id = info->ranges[i].id_base +
703 				   (pf - info->ranges[i].pf);
704 			ret = 0;
705 			goto out;
706 		case RTE_ETH_REPRESENTOR_VF:
707 			if (info->ranges[i].pf != pf)
708 				continue;
709 			if (representor_port < info->ranges[i].vf ||
710 			    representor_port >= info->ranges[i].vf + count)
711 				continue;
712 			*repr_id = info->ranges[i].id_base +
713 				   (representor_port - info->ranges[i].vf);
714 			ret = 0;
715 			goto out;
716 		case RTE_ETH_REPRESENTOR_SF:
717 			if (info->ranges[i].pf != pf)
718 				continue;
719 			if (representor_port < info->ranges[i].sf ||
720 			    representor_port >= info->ranges[i].sf + count)
721 				continue;
722 			*repr_id = info->ranges[i].id_base +
723 			      (representor_port - info->ranges[i].sf);
724 			ret = 0;
725 			goto out;
726 		default:
727 			break;
728 		}
729 	}
730 out:
731 	free(info);
732 	return ret;
733 }
734 
735 int
736 rte_eth_switch_domain_alloc(uint16_t *domain_id)
737 {
738 	uint16_t i;
739 
740 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
741 
742 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
743 		if (eth_dev_switch_domains[i].state ==
744 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
745 			eth_dev_switch_domains[i].state =
746 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
747 			*domain_id = i;
748 			return 0;
749 		}
750 	}
751 
752 	return -ENOSPC;
753 }
754 
755 int
756 rte_eth_switch_domain_free(uint16_t domain_id)
757 {
758 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
759 		domain_id >= RTE_MAX_ETHPORTS)
760 		return -EINVAL;
761 
762 	if (eth_dev_switch_domains[domain_id].state !=
763 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
764 		return -EINVAL;
765 
766 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
767 
768 	return 0;
769 }
770 
771