xref: /dpdk/drivers/bus/vdev/vdev.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 RehiveTech. All rights reserved.
3  */
4 
5 #include <string.h>
6 #include <inttypes.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <stdint.h>
10 #include <stdbool.h>
11 #include <sys/queue.h>
12 
13 #include <rte_eal.h>
14 #include <rte_dev.h>
15 #include <rte_bus.h>
16 #include <rte_common.h>
17 #include <rte_devargs.h>
18 #include <rte_memory.h>
19 #include <rte_tailq.h>
20 #include <rte_spinlock.h>
21 #include <rte_string_fns.h>
22 #include <rte_errno.h>
23 
24 #include "rte_bus_vdev.h"
25 #include "vdev_logs.h"
26 #include "vdev_private.h"
27 
28 #define VDEV_MP_KEY	"bus_vdev_mp"
29 
30 /* Forward declare to access virtual bus name */
31 static struct rte_bus rte_vdev_bus;
32 
33 /** Double linked list of virtual device drivers. */
34 TAILQ_HEAD(vdev_device_list, rte_vdev_device);
35 
36 static struct vdev_device_list vdev_device_list =
37 	TAILQ_HEAD_INITIALIZER(vdev_device_list);
38 /* The lock needs to be recursive because a vdev can manage another vdev. */
39 static rte_spinlock_recursive_t vdev_device_list_lock =
40 	RTE_SPINLOCK_RECURSIVE_INITIALIZER;
41 
42 static struct vdev_driver_list vdev_driver_list =
43 	TAILQ_HEAD_INITIALIZER(vdev_driver_list);
44 
45 struct vdev_custom_scan {
46 	TAILQ_ENTRY(vdev_custom_scan) next;
47 	rte_vdev_scan_callback callback;
48 	void *user_arg;
49 };
50 TAILQ_HEAD(vdev_custom_scans, vdev_custom_scan);
51 static struct vdev_custom_scans vdev_custom_scans =
52 	TAILQ_HEAD_INITIALIZER(vdev_custom_scans);
53 static rte_spinlock_t vdev_custom_scan_lock = RTE_SPINLOCK_INITIALIZER;
54 
55 /* register a driver */
56 void
57 rte_vdev_register(struct rte_vdev_driver *driver)
58 {
59 	TAILQ_INSERT_TAIL(&vdev_driver_list, driver, next);
60 }
61 
62 /* unregister a driver */
63 void
64 rte_vdev_unregister(struct rte_vdev_driver *driver)
65 {
66 	TAILQ_REMOVE(&vdev_driver_list, driver, next);
67 }
68 
69 int
70 rte_vdev_add_custom_scan(rte_vdev_scan_callback callback, void *user_arg)
71 {
72 	struct vdev_custom_scan *custom_scan;
73 
74 	rte_spinlock_lock(&vdev_custom_scan_lock);
75 
76 	/* check if already registered */
77 	TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
78 		if (custom_scan->callback == callback &&
79 				custom_scan->user_arg == user_arg)
80 			break;
81 	}
82 
83 	if (custom_scan == NULL) {
84 		custom_scan = malloc(sizeof(struct vdev_custom_scan));
85 		if (custom_scan != NULL) {
86 			custom_scan->callback = callback;
87 			custom_scan->user_arg = user_arg;
88 			TAILQ_INSERT_TAIL(&vdev_custom_scans, custom_scan, next);
89 		}
90 	}
91 
92 	rte_spinlock_unlock(&vdev_custom_scan_lock);
93 
94 	return (custom_scan == NULL) ? -1 : 0;
95 }
96 
97 int
98 rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg)
99 {
100 	struct vdev_custom_scan *custom_scan, *tmp_scan;
101 
102 	rte_spinlock_lock(&vdev_custom_scan_lock);
103 	TAILQ_FOREACH_SAFE(custom_scan, &vdev_custom_scans, next, tmp_scan) {
104 		if (custom_scan->callback != callback ||
105 				(custom_scan->user_arg != (void *)-1 &&
106 				custom_scan->user_arg != user_arg))
107 			continue;
108 		TAILQ_REMOVE(&vdev_custom_scans, custom_scan, next);
109 		free(custom_scan);
110 	}
111 	rte_spinlock_unlock(&vdev_custom_scan_lock);
112 
113 	return 0;
114 }
115 
116 static int
117 vdev_parse(const char *name, void *addr)
118 {
119 	struct rte_vdev_driver **out = addr;
120 	struct rte_vdev_driver *driver = NULL;
121 
122 	TAILQ_FOREACH(driver, &vdev_driver_list, next) {
123 		if (strncmp(driver->driver.name, name,
124 			    strlen(driver->driver.name)) == 0)
125 			break;
126 		if (driver->driver.alias &&
127 		    strncmp(driver->driver.alias, name,
128 			    strlen(driver->driver.alias)) == 0)
129 			break;
130 	}
131 	if (driver != NULL &&
132 	    addr != NULL)
133 		*out = driver;
134 	return driver == NULL;
135 }
136 
137 static int
138 vdev_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
139 {
140 	struct rte_vdev_device *vdev = RTE_DEV_TO_VDEV(dev);
141 	const struct rte_vdev_driver *driver;
142 
143 	if (!vdev) {
144 		rte_errno = EINVAL;
145 		return -1;
146 	}
147 
148 	if (!vdev->device.driver) {
149 		VDEV_LOG(DEBUG, "no driver attach to device %s", dev->name);
150 		return 1;
151 	}
152 
153 	driver = container_of(vdev->device.driver, const struct rte_vdev_driver,
154 			driver);
155 
156 	if (driver->dma_map)
157 		return driver->dma_map(vdev, addr, iova, len);
158 
159 	return 0;
160 }
161 
162 static int
163 vdev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
164 {
165 	struct rte_vdev_device *vdev = RTE_DEV_TO_VDEV(dev);
166 	const struct rte_vdev_driver *driver;
167 
168 	if (!vdev) {
169 		rte_errno = EINVAL;
170 		return -1;
171 	}
172 
173 	if (!vdev->device.driver) {
174 		VDEV_LOG(DEBUG, "no driver attach to device %s", dev->name);
175 		return 1;
176 	}
177 
178 	driver = container_of(vdev->device.driver, const struct rte_vdev_driver,
179 			driver);
180 
181 	if (driver->dma_unmap)
182 		return driver->dma_unmap(vdev, addr, iova, len);
183 
184 	return 0;
185 }
186 
187 static int
188 vdev_probe_all_drivers(struct rte_vdev_device *dev)
189 {
190 	const char *name;
191 	struct rte_vdev_driver *driver;
192 	enum rte_iova_mode iova_mode;
193 	int ret;
194 
195 	if (rte_dev_is_probed(&dev->device))
196 		return -EEXIST;
197 
198 	name = rte_vdev_device_name(dev);
199 	VDEV_LOG(DEBUG, "Search driver to probe device %s", name);
200 
201 	if (vdev_parse(name, &driver))
202 		return -1;
203 
204 	iova_mode = rte_eal_iova_mode();
205 	if ((driver->drv_flags & RTE_VDEV_DRV_NEED_IOVA_AS_VA) && (iova_mode == RTE_IOVA_PA)) {
206 		VDEV_LOG(ERR, "%s requires VA IOVA mode but current mode is PA, not initializing",
207 				name);
208 		return -1;
209 	}
210 
211 	ret = driver->probe(dev);
212 	if (ret == 0)
213 		dev->device.driver = &driver->driver;
214 	return ret;
215 }
216 
217 /* The caller shall be responsible for thread-safe */
218 static struct rte_vdev_device *
219 find_vdev(const char *name)
220 {
221 	struct rte_vdev_device *dev;
222 
223 	if (!name)
224 		return NULL;
225 
226 	TAILQ_FOREACH(dev, &vdev_device_list, next) {
227 		const char *devname = rte_vdev_device_name(dev);
228 
229 		if (!strcmp(devname, name))
230 			return dev;
231 	}
232 
233 	return NULL;
234 }
235 
236 static struct rte_devargs *
237 alloc_devargs(const char *name, const char *args)
238 {
239 	struct rte_devargs *devargs;
240 	int ret;
241 
242 	devargs = calloc(1, sizeof(*devargs));
243 	if (!devargs)
244 		return NULL;
245 
246 	devargs->bus = &rte_vdev_bus;
247 	if (args)
248 		devargs->args = strdup(args);
249 	else
250 		devargs->args = strdup("");
251 
252 	ret = strlcpy(devargs->name, name, sizeof(devargs->name));
253 	if (ret < 0 || ret >= (int)sizeof(devargs->name)) {
254 		free(devargs->args);
255 		free(devargs);
256 		return NULL;
257 	}
258 
259 	return devargs;
260 }
261 
262 static int
263 insert_vdev(const char *name, const char *args,
264 		struct rte_vdev_device **p_dev,
265 		bool init)
266 {
267 	struct rte_vdev_device *dev;
268 	struct rte_devargs *devargs;
269 	int ret;
270 
271 	if (name == NULL)
272 		return -EINVAL;
273 
274 	devargs = alloc_devargs(name, args);
275 	if (!devargs)
276 		return -ENOMEM;
277 
278 	dev = calloc(1, sizeof(*dev));
279 	if (!dev) {
280 		ret = -ENOMEM;
281 		goto fail;
282 	}
283 
284 	dev->device.bus = &rte_vdev_bus;
285 	dev->device.numa_node = SOCKET_ID_ANY;
286 	dev->device.name = devargs->name;
287 
288 	if (find_vdev(name)) {
289 		/*
290 		 * A vdev is expected to have only one port.
291 		 * So there is no reason to try probing again,
292 		 * even with new arguments.
293 		 */
294 		ret = -EEXIST;
295 		goto fail;
296 	}
297 
298 	if (init)
299 		rte_devargs_insert(&devargs);
300 	dev->device.devargs = devargs;
301 	TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
302 
303 	if (p_dev)
304 		*p_dev = dev;
305 
306 	return 0;
307 fail:
308 	free(devargs->args);
309 	free(devargs);
310 	free(dev);
311 	return ret;
312 }
313 
314 int
315 rte_vdev_init(const char *name, const char *args)
316 {
317 	struct rte_vdev_device *dev;
318 	int ret;
319 
320 	rte_spinlock_recursive_lock(&vdev_device_list_lock);
321 	ret = insert_vdev(name, args, &dev, true);
322 	if (ret == 0) {
323 		ret = vdev_probe_all_drivers(dev);
324 		if (ret) {
325 			if (ret > 0)
326 				VDEV_LOG(ERR, "no driver found for %s", name);
327 			/* If fails, remove it from vdev list */
328 			TAILQ_REMOVE(&vdev_device_list, dev, next);
329 			rte_devargs_remove(dev->device.devargs);
330 			free(dev);
331 		}
332 	}
333 	rte_spinlock_recursive_unlock(&vdev_device_list_lock);
334 	return ret;
335 }
336 
337 static int
338 vdev_remove_driver(struct rte_vdev_device *dev)
339 {
340 	const char *name = rte_vdev_device_name(dev);
341 	const struct rte_vdev_driver *driver;
342 
343 	if (!dev->device.driver) {
344 		VDEV_LOG(DEBUG, "no driver attach to device %s", name);
345 		return 1;
346 	}
347 
348 	driver = container_of(dev->device.driver, const struct rte_vdev_driver,
349 		driver);
350 	return driver->remove(dev);
351 }
352 
353 int
354 rte_vdev_uninit(const char *name)
355 {
356 	struct rte_vdev_device *dev;
357 	int ret;
358 
359 	if (name == NULL)
360 		return -EINVAL;
361 
362 	rte_spinlock_recursive_lock(&vdev_device_list_lock);
363 
364 	dev = find_vdev(name);
365 	if (!dev) {
366 		ret = -ENOENT;
367 		goto unlock;
368 	}
369 
370 	ret = vdev_remove_driver(dev);
371 	if (ret)
372 		goto unlock;
373 
374 	TAILQ_REMOVE(&vdev_device_list, dev, next);
375 	rte_devargs_remove(dev->device.devargs);
376 	free(dev);
377 
378 unlock:
379 	rte_spinlock_recursive_unlock(&vdev_device_list_lock);
380 	return ret;
381 }
382 
383 struct vdev_param {
384 #define VDEV_SCAN_REQ	1
385 #define VDEV_SCAN_ONE	2
386 #define VDEV_SCAN_REP	3
387 	int type;
388 	int num;
389 	char name[RTE_DEV_NAME_MAX_LEN];
390 };
391 
392 static int vdev_plug(struct rte_device *dev);
393 
394 /**
395  * This function works as the action for both primary and secondary process
396  * for static vdev discovery when a secondary process is booting.
397  *
398  * step 1, secondary process sends a sync request to ask for vdev in primary;
399  * step 2, primary process receives the request, and send vdevs one by one;
400  * step 3, primary process sends back reply, which indicates how many vdevs
401  * are sent.
402  */
403 static int
404 vdev_action(const struct rte_mp_msg *mp_msg, const void *peer)
405 {
406 	struct rte_vdev_device *dev;
407 	struct rte_mp_msg mp_resp;
408 	struct vdev_param *ou = (struct vdev_param *)&mp_resp.param;
409 	const struct vdev_param *in = (const struct vdev_param *)mp_msg->param;
410 	const char *devname;
411 	int num;
412 	int ret;
413 
414 	strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name));
415 	mp_resp.len_param = sizeof(*ou);
416 	mp_resp.num_fds = 0;
417 
418 	switch (in->type) {
419 	case VDEV_SCAN_REQ:
420 		ou->type = VDEV_SCAN_ONE;
421 		ou->num = 1;
422 		num = 0;
423 
424 		rte_spinlock_recursive_lock(&vdev_device_list_lock);
425 		TAILQ_FOREACH(dev, &vdev_device_list, next) {
426 			devname = rte_vdev_device_name(dev);
427 			if (strlen(devname) == 0) {
428 				VDEV_LOG(INFO, "vdev with no name is not sent");
429 				continue;
430 			}
431 			VDEV_LOG(INFO, "send vdev, %s", devname);
432 			strlcpy(ou->name, devname, RTE_DEV_NAME_MAX_LEN);
433 			if (rte_mp_sendmsg(&mp_resp) < 0)
434 				VDEV_LOG(ERR, "send vdev, %s, failed, %s",
435 					 devname, strerror(rte_errno));
436 			num++;
437 		}
438 		rte_spinlock_recursive_unlock(&vdev_device_list_lock);
439 
440 		ou->type = VDEV_SCAN_REP;
441 		ou->num = num;
442 		if (rte_mp_reply(&mp_resp, peer) < 0)
443 			VDEV_LOG(ERR, "Failed to reply a scan request");
444 		break;
445 	case VDEV_SCAN_ONE:
446 		VDEV_LOG(INFO, "receive vdev, %s", in->name);
447 		ret = insert_vdev(in->name, NULL, NULL, false);
448 		if (ret == -EEXIST)
449 			VDEV_LOG(DEBUG, "device already exist, %s", in->name);
450 		else if (ret < 0)
451 			VDEV_LOG(ERR, "failed to add vdev, %s", in->name);
452 		break;
453 	default:
454 		VDEV_LOG(ERR, "vdev cannot recognize this message");
455 	}
456 
457 	return 0;
458 }
459 
460 static int
461 vdev_scan(void)
462 {
463 	struct rte_vdev_device *dev;
464 	struct rte_devargs *devargs;
465 	struct vdev_custom_scan *custom_scan;
466 
467 	if (rte_mp_action_register(VDEV_MP_KEY, vdev_action) < 0 &&
468 	    rte_errno != EEXIST) {
469 		/* for primary, unsupported IPC is not an error */
470 		if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
471 				rte_errno == ENOTSUP)
472 			goto scan;
473 		VDEV_LOG(ERR, "Failed to add vdev mp action");
474 		return -1;
475 	}
476 
477 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
478 		struct rte_mp_msg mp_req, *mp_rep;
479 		struct rte_mp_reply mp_reply;
480 		struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
481 		struct vdev_param *req = (struct vdev_param *)mp_req.param;
482 		struct vdev_param *resp;
483 
484 		strlcpy(mp_req.name, VDEV_MP_KEY, sizeof(mp_req.name));
485 		mp_req.len_param = sizeof(*req);
486 		mp_req.num_fds = 0;
487 		req->type = VDEV_SCAN_REQ;
488 		if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
489 		    mp_reply.nb_received == 1) {
490 			mp_rep = &mp_reply.msgs[0];
491 			resp = (struct vdev_param *)mp_rep->param;
492 			VDEV_LOG(INFO, "Received %d vdevs", resp->num);
493 			free(mp_reply.msgs);
494 		} else
495 			VDEV_LOG(ERR, "Failed to request vdev from primary");
496 
497 		/* Fall through to allow private vdevs in secondary process */
498 	}
499 
500 scan:
501 	/* call custom scan callbacks if any */
502 	rte_spinlock_lock(&vdev_custom_scan_lock);
503 	TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
504 		if (custom_scan->callback != NULL)
505 			/*
506 			 * the callback should update devargs list
507 			 * by calling rte_devargs_insert() with
508 			 *     devargs.bus = rte_bus_find_by_name("vdev");
509 			 *     devargs.type = RTE_DEVTYPE_VIRTUAL;
510 			 *     devargs.policy = RTE_DEV_ALLOWED;
511 			 */
512 			custom_scan->callback(custom_scan->user_arg);
513 	}
514 	rte_spinlock_unlock(&vdev_custom_scan_lock);
515 
516 	/* for virtual devices we scan the devargs_list populated via cmdline */
517 	RTE_EAL_DEVARGS_FOREACH("vdev", devargs) {
518 
519 		dev = calloc(1, sizeof(*dev));
520 		if (!dev)
521 			return -1;
522 
523 		rte_spinlock_recursive_lock(&vdev_device_list_lock);
524 
525 		if (find_vdev(devargs->name)) {
526 			rte_spinlock_recursive_unlock(&vdev_device_list_lock);
527 			free(dev);
528 			continue;
529 		}
530 
531 		dev->device.bus = &rte_vdev_bus;
532 		dev->device.devargs = devargs;
533 		dev->device.numa_node = SOCKET_ID_ANY;
534 		dev->device.name = devargs->name;
535 
536 		TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
537 
538 		rte_spinlock_recursive_unlock(&vdev_device_list_lock);
539 	}
540 
541 	return 0;
542 }
543 
544 static int
545 vdev_probe(void)
546 {
547 	struct rte_vdev_device *dev;
548 	int r, ret = 0;
549 
550 	/* call the init function for each virtual device */
551 	TAILQ_FOREACH(dev, &vdev_device_list, next) {
552 		/* we don't use the vdev lock here, as it's only used in DPDK
553 		 * initialization; and we don't want to hold such a lock when
554 		 * we call each driver probe.
555 		 */
556 
557 		r = vdev_probe_all_drivers(dev);
558 		if (r != 0) {
559 			if (r == -EEXIST)
560 				continue;
561 			VDEV_LOG(ERR, "failed to initialize %s device",
562 				rte_vdev_device_name(dev));
563 			ret = -1;
564 		}
565 	}
566 
567 	return ret;
568 }
569 
570 struct rte_device *
571 rte_vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
572 		     const void *data)
573 {
574 	const struct rte_vdev_device *vstart;
575 	struct rte_vdev_device *dev;
576 
577 	rte_spinlock_recursive_lock(&vdev_device_list_lock);
578 	if (start != NULL) {
579 		vstart = RTE_DEV_TO_VDEV_CONST(start);
580 		dev = TAILQ_NEXT(vstart, next);
581 	} else {
582 		dev = TAILQ_FIRST(&vdev_device_list);
583 	}
584 	while (dev != NULL) {
585 		if (cmp(&dev->device, data) == 0)
586 			break;
587 		dev = TAILQ_NEXT(dev, next);
588 	}
589 	rte_spinlock_recursive_unlock(&vdev_device_list_lock);
590 
591 	return dev ? &dev->device : NULL;
592 }
593 
594 static int
595 vdev_plug(struct rte_device *dev)
596 {
597 	return vdev_probe_all_drivers(RTE_DEV_TO_VDEV(dev));
598 }
599 
600 static int
601 vdev_unplug(struct rte_device *dev)
602 {
603 	return rte_vdev_uninit(dev->name);
604 }
605 
606 static enum rte_iova_mode
607 vdev_get_iommu_class(void)
608 {
609 	const char *name;
610 	struct rte_vdev_device *dev;
611 	struct rte_vdev_driver *driver;
612 
613 	TAILQ_FOREACH(dev, &vdev_device_list, next) {
614 		name = rte_vdev_device_name(dev);
615 		if (vdev_parse(name, &driver))
616 			continue;
617 
618 		if (driver->drv_flags & RTE_VDEV_DRV_NEED_IOVA_AS_VA)
619 			return RTE_IOVA_VA;
620 	}
621 
622 	return RTE_IOVA_DC;
623 }
624 
625 static struct rte_bus rte_vdev_bus = {
626 	.scan = vdev_scan,
627 	.probe = vdev_probe,
628 	.find_device = rte_vdev_find_device,
629 	.plug = vdev_plug,
630 	.unplug = vdev_unplug,
631 	.parse = vdev_parse,
632 	.dma_map = vdev_dma_map,
633 	.dma_unmap = vdev_dma_unmap,
634 	.get_iommu_class = vdev_get_iommu_class,
635 	.dev_iterate = rte_vdev_dev_iterate,
636 };
637 
638 RTE_REGISTER_BUS(vdev, rte_vdev_bus);
639 RTE_LOG_REGISTER(vdev_logtype_bus, bus.vdev, NOTICE);
640