xref: /dpdk/lib/ethdev/ethdev_driver.c (revision 5d52418fa4b9a7f28eaedc1d88ec5cf330381c0e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2022 Intel Corporation
3  */
4 
5 #include <stdlib.h>
6 #include <pthread.h>
7 
8 #include <rte_kvargs.h>
9 #include <rte_malloc.h>
10 
11 #include "ethdev_driver.h"
12 #include "ethdev_private.h"
13 
14 /**
15  * A set of values to describe the possible states of a switch domain.
16  */
17 enum rte_eth_switch_domain_state {
18 	RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
19 	RTE_ETH_SWITCH_DOMAIN_ALLOCATED
20 };
21 
22 /**
23  * Array of switch domains available for allocation. Array is sized to
24  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
25  * ethdev ports in a single process.
26  */
27 static struct rte_eth_dev_switch {
28 	enum rte_eth_switch_domain_state state;
29 } eth_dev_switch_domains[RTE_MAX_ETHPORTS];
30 
31 static struct rte_eth_dev *
32 eth_dev_allocated(const char *name)
33 {
34 	uint16_t i;
35 
36 	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
37 
38 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
39 		if (rte_eth_devices[i].data != NULL &&
40 		    strcmp(rte_eth_devices[i].data->name, name) == 0)
41 			return &rte_eth_devices[i];
42 	}
43 	return NULL;
44 }
45 
46 static uint16_t
47 eth_dev_find_free_port(void)
48 {
49 	uint16_t i;
50 
51 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
52 		/* Using shared name field to find a free port. */
53 		if (eth_dev_shared_data->data[i].name[0] == '\0') {
54 			RTE_ASSERT(rte_eth_devices[i].state ==
55 				   RTE_ETH_DEV_UNUSED);
56 			return i;
57 		}
58 	}
59 	return RTE_MAX_ETHPORTS;
60 }
61 
62 static struct rte_eth_dev *
63 eth_dev_get(uint16_t port_id)
64 {
65 	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
66 
67 	eth_dev->data = &eth_dev_shared_data->data[port_id];
68 
69 	return eth_dev;
70 }
71 
72 struct rte_eth_dev *
73 rte_eth_dev_allocate(const char *name)
74 {
75 	uint16_t port_id;
76 	struct rte_eth_dev *eth_dev = NULL;
77 	size_t name_len;
78 
79 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
80 	if (name_len == 0) {
81 		RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
82 		return NULL;
83 	}
84 
85 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
86 		RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
87 		return NULL;
88 	}
89 
90 	eth_dev_shared_data_prepare();
91 
92 	/* Synchronize port creation between primary and secondary threads. */
93 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
94 
95 	if (eth_dev_allocated(name) != NULL) {
96 		RTE_ETHDEV_LOG(ERR,
97 			"Ethernet device with name %s already allocated\n",
98 			name);
99 		goto unlock;
100 	}
101 
102 	port_id = eth_dev_find_free_port();
103 	if (port_id == RTE_MAX_ETHPORTS) {
104 		RTE_ETHDEV_LOG(ERR,
105 			"Reached maximum number of Ethernet ports\n");
106 		goto unlock;
107 	}
108 
109 	eth_dev = eth_dev_get(port_id);
110 	strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
111 	eth_dev->data->port_id = port_id;
112 	eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
113 	eth_dev->data->mtu = RTE_ETHER_MTU;
114 	pthread_mutex_init(&eth_dev->data->flow_ops_mutex, NULL);
115 
116 unlock:
117 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
118 
119 	return eth_dev;
120 }
121 
122 struct rte_eth_dev *
123 rte_eth_dev_allocated(const char *name)
124 {
125 	struct rte_eth_dev *ethdev;
126 
127 	eth_dev_shared_data_prepare();
128 
129 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
130 
131 	ethdev = eth_dev_allocated(name);
132 
133 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
134 
135 	return ethdev;
136 }
137 
138 /*
139  * Attach to a port already registered by the primary process, which
140  * makes sure that the same device would have the same port ID both
141  * in the primary and secondary process.
142  */
143 struct rte_eth_dev *
144 rte_eth_dev_attach_secondary(const char *name)
145 {
146 	uint16_t i;
147 	struct rte_eth_dev *eth_dev = NULL;
148 
149 	eth_dev_shared_data_prepare();
150 
151 	/* Synchronize port attachment to primary port creation and release. */
152 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
153 
154 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
155 		if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
156 			break;
157 	}
158 	if (i == RTE_MAX_ETHPORTS) {
159 		RTE_ETHDEV_LOG(ERR,
160 			"Device %s is not driven by the primary process\n",
161 			name);
162 	} else {
163 		eth_dev = eth_dev_get(i);
164 		RTE_ASSERT(eth_dev->data->port_id == i);
165 	}
166 
167 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
168 	return eth_dev;
169 }
170 
171 int
172 rte_eth_dev_callback_process(struct rte_eth_dev *dev,
173 	enum rte_eth_event_type event, void *ret_param)
174 {
175 	struct rte_eth_dev_callback *cb_lst;
176 	struct rte_eth_dev_callback dev_cb;
177 	int rc = 0;
178 
179 	rte_spinlock_lock(&eth_dev_cb_lock);
180 	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
181 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
182 			continue;
183 		dev_cb = *cb_lst;
184 		cb_lst->active = 1;
185 		if (ret_param != NULL)
186 			dev_cb.ret_param = ret_param;
187 
188 		rte_spinlock_unlock(&eth_dev_cb_lock);
189 		rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
190 				dev_cb.cb_arg, dev_cb.ret_param);
191 		rte_spinlock_lock(&eth_dev_cb_lock);
192 		cb_lst->active = 0;
193 	}
194 	rte_spinlock_unlock(&eth_dev_cb_lock);
195 	return rc;
196 }
197 
198 void
199 rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
200 {
201 	if (dev == NULL)
202 		return;
203 
204 	/*
205 	 * for secondary process, at that point we expect device
206 	 * to be already 'usable', so shared data and all function pointers
207 	 * for fast-path devops have to be setup properly inside rte_eth_dev.
208 	 */
209 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
210 		eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
211 
212 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
213 
214 	dev->state = RTE_ETH_DEV_ATTACHED;
215 }
216 
217 int
218 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
219 {
220 	if (eth_dev == NULL)
221 		return -EINVAL;
222 
223 	eth_dev_shared_data_prepare();
224 
225 	if (eth_dev->state != RTE_ETH_DEV_UNUSED)
226 		rte_eth_dev_callback_process(eth_dev,
227 				RTE_ETH_EVENT_DESTROY, NULL);
228 
229 	eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
230 
231 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
232 
233 	eth_dev->state = RTE_ETH_DEV_UNUSED;
234 	eth_dev->device = NULL;
235 	eth_dev->process_private = NULL;
236 	eth_dev->intr_handle = NULL;
237 	eth_dev->rx_pkt_burst = NULL;
238 	eth_dev->tx_pkt_burst = NULL;
239 	eth_dev->tx_pkt_prepare = NULL;
240 	eth_dev->rx_queue_count = NULL;
241 	eth_dev->rx_descriptor_status = NULL;
242 	eth_dev->tx_descriptor_status = NULL;
243 	eth_dev->dev_ops = NULL;
244 
245 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
246 		rte_free(eth_dev->data->rx_queues);
247 		rte_free(eth_dev->data->tx_queues);
248 		rte_free(eth_dev->data->mac_addrs);
249 		rte_free(eth_dev->data->hash_mac_addrs);
250 		rte_free(eth_dev->data->dev_private);
251 		pthread_mutex_destroy(&eth_dev->data->flow_ops_mutex);
252 		memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
253 	}
254 
255 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
256 
257 	return 0;
258 }
259 
260 int
261 rte_eth_dev_create(struct rte_device *device, const char *name,
262 	size_t priv_data_size,
263 	ethdev_bus_specific_init ethdev_bus_specific_init,
264 	void *bus_init_params,
265 	ethdev_init_t ethdev_init, void *init_params)
266 {
267 	struct rte_eth_dev *ethdev;
268 	int retval;
269 
270 	if (*ethdev_init == NULL)
271 		return -EINVAL;
272 
273 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
274 		ethdev = rte_eth_dev_allocate(name);
275 		if (!ethdev)
276 			return -ENODEV;
277 
278 		if (priv_data_size) {
279 			ethdev->data->dev_private = rte_zmalloc_socket(
280 				name, priv_data_size, RTE_CACHE_LINE_SIZE,
281 				device->numa_node);
282 
283 			if (!ethdev->data->dev_private) {
284 				RTE_ETHDEV_LOG(ERR,
285 					"failed to allocate private data\n");
286 				retval = -ENOMEM;
287 				goto probe_failed;
288 			}
289 		}
290 	} else {
291 		ethdev = rte_eth_dev_attach_secondary(name);
292 		if (!ethdev) {
293 			RTE_ETHDEV_LOG(ERR,
294 				"secondary process attach failed, ethdev doesn't exist\n");
295 			return  -ENODEV;
296 		}
297 	}
298 
299 	ethdev->device = device;
300 
301 	if (ethdev_bus_specific_init) {
302 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
303 		if (retval) {
304 			RTE_ETHDEV_LOG(ERR,
305 				"ethdev bus specific initialisation failed\n");
306 			goto probe_failed;
307 		}
308 	}
309 
310 	retval = ethdev_init(ethdev, init_params);
311 	if (retval) {
312 		RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
313 		goto probe_failed;
314 	}
315 
316 	rte_eth_dev_probing_finish(ethdev);
317 
318 	return retval;
319 
320 probe_failed:
321 	rte_eth_dev_release_port(ethdev);
322 	return retval;
323 }
324 
325 int
326 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
327 	ethdev_uninit_t ethdev_uninit)
328 {
329 	int ret;
330 
331 	ethdev = rte_eth_dev_allocated(ethdev->data->name);
332 	if (!ethdev)
333 		return -ENODEV;
334 
335 	if (*ethdev_uninit == NULL)
336 		return -EINVAL;
337 
338 	ret = ethdev_uninit(ethdev);
339 	if (ret)
340 		return ret;
341 
342 	return rte_eth_dev_release_port(ethdev);
343 }
344 
345 struct rte_eth_dev *
346 rte_eth_dev_get_by_name(const char *name)
347 {
348 	uint16_t pid;
349 
350 	if (rte_eth_dev_get_port_by_name(name, &pid))
351 		return NULL;
352 
353 	return &rte_eth_devices[pid];
354 }
355 
356 int
357 rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
358 {
359 	if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
360 		return 1;
361 	return 0;
362 }
363 
364 int
365 rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
366 {
367 	if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
368 		return 1;
369 	return 0;
370 }
371 
372 void
373 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
374 {
375 	if (dev->data->dev_started) {
376 		RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
377 			dev->data->port_id);
378 		return;
379 	}
380 
381 	eth_dev_rx_queue_config(dev, 0);
382 	eth_dev_tx_queue_config(dev, 0);
383 
384 	memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
385 }
386 
387 static int
388 eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
389 {
390 	int state;
391 	struct rte_kvargs_pair *pair;
392 	char *letter;
393 
394 	arglist->str = strdup(str_in);
395 	if (arglist->str == NULL)
396 		return -ENOMEM;
397 
398 	letter = arglist->str;
399 	state = 0;
400 	arglist->count = 0;
401 	pair = &arglist->pairs[0];
402 	while (1) {
403 		switch (state) {
404 		case 0: /* Initial */
405 			if (*letter == '=')
406 				return -EINVAL;
407 			else if (*letter == '\0')
408 				return 0;
409 
410 			state = 1;
411 			pair->key = letter;
412 			/* fallthrough */
413 
414 		case 1: /* Parsing key */
415 			if (*letter == '=') {
416 				*letter = '\0';
417 				pair->value = letter + 1;
418 				state = 2;
419 			} else if (*letter == ',' || *letter == '\0')
420 				return -EINVAL;
421 			break;
422 
423 
424 		case 2: /* Parsing value */
425 			if (*letter == '[')
426 				state = 3;
427 			else if (*letter == ',') {
428 				*letter = '\0';
429 				arglist->count++;
430 				pair = &arglist->pairs[arglist->count];
431 				state = 0;
432 			} else if (*letter == '\0') {
433 				letter--;
434 				arglist->count++;
435 				pair = &arglist->pairs[arglist->count];
436 				state = 0;
437 			}
438 			break;
439 
440 		case 3: /* Parsing list */
441 			if (*letter == ']')
442 				state = 2;
443 			else if (*letter == '\0')
444 				return -EINVAL;
445 			break;
446 		}
447 		letter++;
448 	}
449 }
450 
451 int
452 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
453 {
454 	struct rte_kvargs args;
455 	struct rte_kvargs_pair *pair;
456 	unsigned int i;
457 	int result = 0;
458 
459 	memset(eth_da, 0, sizeof(*eth_da));
460 
461 	result = eth_dev_devargs_tokenise(&args, dargs);
462 	if (result < 0)
463 		goto parse_cleanup;
464 
465 	for (i = 0; i < args.count; i++) {
466 		pair = &args.pairs[i];
467 		if (strcmp("representor", pair->key) == 0) {
468 			if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
469 				RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
470 					dargs);
471 				result = -1;
472 				goto parse_cleanup;
473 			}
474 			result = rte_eth_devargs_parse_representor_ports(
475 					pair->value, eth_da);
476 			if (result < 0)
477 				goto parse_cleanup;
478 		}
479 	}
480 
481 parse_cleanup:
482 	free(args.str);
483 
484 	return result;
485 }
486 
487 static inline int
488 eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
489 		const char *ring_name)
490 {
491 	return snprintf(name, len, "eth_p%d_q%d_%s",
492 			port_id, queue_id, ring_name);
493 }
494 
495 int
496 rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
497 		uint16_t queue_id)
498 {
499 	char z_name[RTE_MEMZONE_NAMESIZE];
500 	const struct rte_memzone *mz;
501 	int rc = 0;
502 
503 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
504 			queue_id, ring_name);
505 	if (rc >= RTE_MEMZONE_NAMESIZE) {
506 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
507 		return -ENAMETOOLONG;
508 	}
509 
510 	mz = rte_memzone_lookup(z_name);
511 	if (mz)
512 		rc = rte_memzone_free(mz);
513 	else
514 		rc = -ENOENT;
515 
516 	return rc;
517 }
518 
519 const struct rte_memzone *
520 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
521 			 uint16_t queue_id, size_t size, unsigned int align,
522 			 int socket_id)
523 {
524 	char z_name[RTE_MEMZONE_NAMESIZE];
525 	const struct rte_memzone *mz;
526 	int rc;
527 
528 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
529 			queue_id, ring_name);
530 	if (rc >= RTE_MEMZONE_NAMESIZE) {
531 		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
532 		rte_errno = ENAMETOOLONG;
533 		return NULL;
534 	}
535 
536 	mz = rte_memzone_lookup(z_name);
537 	if (mz) {
538 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
539 				size > mz->len ||
540 				((uintptr_t)mz->addr & (align - 1)) != 0) {
541 			RTE_ETHDEV_LOG(ERR,
542 				"memzone %s does not justify the requested attributes\n",
543 				mz->name);
544 			return NULL;
545 		}
546 
547 		return mz;
548 	}
549 
550 	return rte_memzone_reserve_aligned(z_name, size, socket_id,
551 			RTE_MEMZONE_IOVA_CONTIG, align);
552 }
553 
554 int
555 rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
556 				struct rte_hairpin_peer_info *peer_info,
557 				uint32_t direction)
558 {
559 	struct rte_eth_dev *dev;
560 
561 	if (peer_info == NULL)
562 		return -EINVAL;
563 
564 	/* No need to check the validity again. */
565 	dev = &rte_eth_devices[cur_port];
566 	if (*dev->dev_ops->hairpin_queue_peer_bind == NULL)
567 		return -ENOTSUP;
568 
569 	return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
570 							peer_info, direction);
571 }
572 
573 int
574 rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
575 				  uint32_t direction)
576 {
577 	struct rte_eth_dev *dev;
578 
579 	/* No need to check the validity again. */
580 	dev = &rte_eth_devices[cur_port];
581 	if (*dev->dev_ops->hairpin_queue_peer_unbind == NULL)
582 		return -ENOTSUP;
583 
584 	return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
585 							  direction);
586 }
587 
588 int
589 rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
590 				  struct rte_hairpin_peer_info *cur_info,
591 				  struct rte_hairpin_peer_info *peer_info,
592 				  uint32_t direction)
593 {
594 	struct rte_eth_dev *dev;
595 
596 	/* Current queue information is not mandatory. */
597 	if (peer_info == NULL)
598 		return -EINVAL;
599 
600 	/* No need to check the validity again. */
601 	dev = &rte_eth_devices[peer_port];
602 	if (*dev->dev_ops->hairpin_queue_peer_update == NULL)
603 		return -ENOTSUP;
604 
605 	return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
606 					cur_info, peer_info, direction);
607 }
608 
609 int
610 rte_eth_ip_reassembly_dynfield_register(int *field_offset, int *flag_offset)
611 {
612 	static const struct rte_mbuf_dynfield field_desc = {
613 		.name = RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME,
614 		.size = sizeof(rte_eth_ip_reassembly_dynfield_t),
615 		.align = __alignof__(rte_eth_ip_reassembly_dynfield_t),
616 	};
617 	static const struct rte_mbuf_dynflag ip_reassembly_dynflag = {
618 		.name = RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME,
619 	};
620 	int offset;
621 
622 	offset = rte_mbuf_dynfield_register(&field_desc);
623 	if (offset < 0)
624 		return -1;
625 	if (field_offset != NULL)
626 		*field_offset = offset;
627 
628 	offset = rte_mbuf_dynflag_register(&ip_reassembly_dynflag);
629 	if (offset < 0)
630 		return -1;
631 	if (flag_offset != NULL)
632 		*flag_offset = offset;
633 
634 	return 0;
635 }
636 
637 uint16_t
638 rte_eth_pkt_burst_dummy(void *queue __rte_unused,
639 		struct rte_mbuf **pkts __rte_unused,
640 		uint16_t nb_pkts __rte_unused)
641 {
642 	return 0;
643 }
644 
645 int
646 rte_eth_representor_id_get(uint16_t port_id,
647 			   enum rte_eth_representor_type type,
648 			   int controller, int pf, int representor_port,
649 			   uint16_t *repr_id)
650 {
651 	int ret, n, count;
652 	uint32_t i;
653 	struct rte_eth_representor_info *info = NULL;
654 	size_t size;
655 
656 	if (type == RTE_ETH_REPRESENTOR_NONE)
657 		return 0;
658 	if (repr_id == NULL)
659 		return -EINVAL;
660 
661 	/* Get PMD representor range info. */
662 	ret = rte_eth_representor_info_get(port_id, NULL);
663 	if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
664 	    controller == -1 && pf == -1) {
665 		/* Direct mapping for legacy VF representor. */
666 		*repr_id = representor_port;
667 		return 0;
668 	} else if (ret < 0) {
669 		return ret;
670 	}
671 	n = ret;
672 	size = sizeof(*info) + n * sizeof(info->ranges[0]);
673 	info = calloc(1, size);
674 	if (info == NULL)
675 		return -ENOMEM;
676 	info->nb_ranges_alloc = n;
677 	ret = rte_eth_representor_info_get(port_id, info);
678 	if (ret < 0)
679 		goto out;
680 
681 	/* Default controller and pf to caller. */
682 	if (controller == -1)
683 		controller = info->controller;
684 	if (pf == -1)
685 		pf = info->pf;
686 
687 	/* Locate representor ID. */
688 	ret = -ENOENT;
689 	for (i = 0; i < info->nb_ranges; ++i) {
690 		if (info->ranges[i].type != type)
691 			continue;
692 		if (info->ranges[i].controller != controller)
693 			continue;
694 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
695 			RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
696 				port_id, info->ranges[i].id_base,
697 				info->ranges[i].id_end, i);
698 			continue;
699 
700 		}
701 		count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
702 		switch (info->ranges[i].type) {
703 		case RTE_ETH_REPRESENTOR_PF:
704 			if (pf < info->ranges[i].pf ||
705 			    pf >= info->ranges[i].pf + count)
706 				continue;
707 			*repr_id = info->ranges[i].id_base +
708 				   (pf - info->ranges[i].pf);
709 			ret = 0;
710 			goto out;
711 		case RTE_ETH_REPRESENTOR_VF:
712 			if (info->ranges[i].pf != pf)
713 				continue;
714 			if (representor_port < info->ranges[i].vf ||
715 			    representor_port >= info->ranges[i].vf + count)
716 				continue;
717 			*repr_id = info->ranges[i].id_base +
718 				   (representor_port - info->ranges[i].vf);
719 			ret = 0;
720 			goto out;
721 		case RTE_ETH_REPRESENTOR_SF:
722 			if (info->ranges[i].pf != pf)
723 				continue;
724 			if (representor_port < info->ranges[i].sf ||
725 			    representor_port >= info->ranges[i].sf + count)
726 				continue;
727 			*repr_id = info->ranges[i].id_base +
728 			      (representor_port - info->ranges[i].sf);
729 			ret = 0;
730 			goto out;
731 		default:
732 			break;
733 		}
734 	}
735 out:
736 	free(info);
737 	return ret;
738 }
739 
740 int
741 rte_eth_switch_domain_alloc(uint16_t *domain_id)
742 {
743 	uint16_t i;
744 
745 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
746 
747 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
748 		if (eth_dev_switch_domains[i].state ==
749 			RTE_ETH_SWITCH_DOMAIN_UNUSED) {
750 			eth_dev_switch_domains[i].state =
751 				RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
752 			*domain_id = i;
753 			return 0;
754 		}
755 	}
756 
757 	return -ENOSPC;
758 }
759 
760 int
761 rte_eth_switch_domain_free(uint16_t domain_id)
762 {
763 	if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
764 		domain_id >= RTE_MAX_ETHPORTS)
765 		return -EINVAL;
766 
767 	if (eth_dev_switch_domains[domain_id].state !=
768 		RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
769 		return -EINVAL;
770 
771 	eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
772 
773 	return 0;
774 }
775