xref: /dpdk/lib/eal/linux/eal_dev.c (revision 8b8036a66e3d59ffa58afb8d96fa2c73262155a7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <unistd.h>
7 #include <fcntl.h>
8 #include <signal.h>
9 #include <sys/socket.h>
10 #include <linux/netlink.h>
11 
12 #include <rte_string_fns.h>
13 #include <rte_log.h>
14 #include <rte_compat.h>
15 #include <rte_dev.h>
16 #include <rte_malloc.h>
17 #include <rte_interrupts.h>
18 #include <rte_alarm.h>
19 #include <rte_bus.h>
20 #include <rte_eal.h>
21 #include <rte_spinlock.h>
22 #include <rte_errno.h>
23 
24 #include "eal_private.h"
25 
26 static struct rte_intr_handle *intr_handle;
27 static rte_rwlock_t monitor_lock = RTE_RWLOCK_INITIALIZER;
28 static uint32_t monitor_refcount;
29 static bool hotplug_handle;
30 
31 #define EAL_UEV_MSG_LEN 4096
32 #define EAL_UEV_MSG_ELEM_LEN 128
33 
34 /*
35  * spinlock for device hot-unplug failure handling. If it try to access bus or
36  * device, such as handle sigbus on bus or handle memory failure for device
37  * just need to use this lock. It could protect the bus and the device to avoid
38  * race condition.
39  */
40 static rte_spinlock_t failure_handle_lock = RTE_SPINLOCK_INITIALIZER;
41 
42 static struct sigaction sigbus_action_old;
43 
44 static int sigbus_need_recover;
45 
46 static void dev_uev_handler(__rte_unused void *param);
47 
48 /* identify the system layer which reports this event. */
49 enum eal_dev_event_subsystem {
50 	EAL_DEV_EVENT_SUBSYSTEM_PCI, /* PCI bus device event */
51 	EAL_DEV_EVENT_SUBSYSTEM_UIO, /* UIO driver device event */
52 	EAL_DEV_EVENT_SUBSYSTEM_VFIO, /* VFIO driver device event */
53 	EAL_DEV_EVENT_SUBSYSTEM_MAX
54 };
55 
56 static void
57 sigbus_action_recover(void)
58 {
59 	if (sigbus_need_recover) {
60 		sigaction(SIGBUS, &sigbus_action_old, NULL);
61 		sigbus_need_recover = 0;
62 	}
63 }
64 
65 static void sigbus_handler(int signum, siginfo_t *info,
66 				void *ctx __rte_unused)
67 {
68 	int ret;
69 
70 	RTE_LOG(DEBUG, EAL, "Thread catch SIGBUS, fault address:%p\n",
71 		info->si_addr);
72 
73 	rte_spinlock_lock(&failure_handle_lock);
74 	ret = rte_bus_sigbus_handler(info->si_addr);
75 	rte_spinlock_unlock(&failure_handle_lock);
76 	if (ret == -1) {
77 		rte_exit(EXIT_FAILURE,
78 			 "Failed to handle SIGBUS for hot-unplug, "
79 			 "(rte_errno: %s)!", strerror(rte_errno));
80 	} else if (ret == 1) {
81 		if (sigbus_action_old.sa_flags == SA_SIGINFO
82 		    && sigbus_action_old.sa_sigaction) {
83 			(*(sigbus_action_old.sa_sigaction))(signum,
84 							    info, ctx);
85 		} else if (sigbus_action_old.sa_flags != SA_SIGINFO
86 			   && sigbus_action_old.sa_handler) {
87 			(*(sigbus_action_old.sa_handler))(signum);
88 		} else {
89 			rte_exit(EXIT_FAILURE,
90 				 "Failed to handle generic SIGBUS!");
91 		}
92 	}
93 
94 	RTE_LOG(DEBUG, EAL, "Success to handle SIGBUS for hot-unplug!\n");
95 }
96 
97 static int cmp_dev_name(const struct rte_device *dev,
98 	const void *_name)
99 {
100 	const char *name = _name;
101 
102 	return strcmp(dev->name, name);
103 }
104 
105 static int
106 dev_uev_socket_fd_create(void)
107 {
108 	struct sockaddr_nl addr;
109 	int ret, fd;
110 
111 	fd = socket(PF_NETLINK, SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK,
112 		    NETLINK_KOBJECT_UEVENT);
113 	if (fd < 0) {
114 		RTE_LOG(ERR, EAL, "create uevent fd failed.\n");
115 		return -1;
116 	}
117 
118 	memset(&addr, 0, sizeof(addr));
119 	addr.nl_family = AF_NETLINK;
120 	addr.nl_pid = 0;
121 	addr.nl_groups = 0xffffffff;
122 
123 	ret = bind(fd, (struct sockaddr *) &addr, sizeof(addr));
124 	if (ret < 0) {
125 		RTE_LOG(ERR, EAL, "Failed to bind uevent socket.\n");
126 		goto err;
127 	}
128 
129 	if (rte_intr_fd_set(intr_handle, fd))
130 		goto err;
131 
132 	return 0;
133 err:
134 	close(fd);
135 	fd = -1;
136 	return ret;
137 }
138 
139 struct rte_dev_event {
140 	enum rte_dev_event_type type;	/**< device event type */
141 	int subsystem;			/**< subsystem id */
142 	char *devname;			/**< device name */
143 };
144 
145 static int
146 dev_uev_parse(const char *buf, struct rte_dev_event *event, int length)
147 {
148 	char action[EAL_UEV_MSG_ELEM_LEN];
149 	char subsystem[EAL_UEV_MSG_ELEM_LEN];
150 	char pci_slot_name[EAL_UEV_MSG_ELEM_LEN];
151 	int i = 0;
152 
153 	memset(action, 0, EAL_UEV_MSG_ELEM_LEN);
154 	memset(subsystem, 0, EAL_UEV_MSG_ELEM_LEN);
155 	memset(pci_slot_name, 0, EAL_UEV_MSG_ELEM_LEN);
156 
157 	while (i < length) {
158 		for (; i < length; i++) {
159 			if (*buf)
160 				break;
161 			buf++;
162 		}
163 		if (i >= length)
164 			break;
165 
166 		/**
167 		 * check device uevent from kernel side, no need to check
168 		 * uevent from udev.
169 		 */
170 		if (!strncmp(buf, "libudev", 7)) {
171 			buf += 7;
172 			i += 7;
173 			return -1;
174 		}
175 		if (!strncmp(buf, "ACTION=", 7)) {
176 			buf += 7;
177 			i += 7;
178 			strlcpy(action, buf, sizeof(action));
179 		} else if (!strncmp(buf, "SUBSYSTEM=", 10)) {
180 			buf += 10;
181 			i += 10;
182 			strlcpy(subsystem, buf, sizeof(subsystem));
183 		} else if (!strncmp(buf, "PCI_SLOT_NAME=", 14)) {
184 			buf += 14;
185 			i += 14;
186 			strlcpy(pci_slot_name, buf, sizeof(subsystem));
187 			event->devname = strdup(pci_slot_name);
188 		}
189 		for (; i < length; i++) {
190 			if (*buf == '\0')
191 				break;
192 			buf++;
193 		}
194 	}
195 
196 	/* parse the subsystem layer */
197 	if (!strncmp(subsystem, "uio", 3))
198 		event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_UIO;
199 	else if (!strncmp(subsystem, "pci", 3))
200 		event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_PCI;
201 	else if (!strncmp(subsystem, "vfio", 4))
202 		event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_VFIO;
203 	else
204 		goto err;
205 
206 	/* parse the action type */
207 	if (!strncmp(action, "add", 3))
208 		event->type = RTE_DEV_EVENT_ADD;
209 	else if (!strncmp(action, "remove", 6))
210 		event->type = RTE_DEV_EVENT_REMOVE;
211 	else
212 		goto err;
213 	return 0;
214 err:
215 	free(event->devname);
216 	return -1;
217 }
218 
219 static void
220 dev_delayed_unregister(void *param)
221 {
222 	rte_intr_callback_unregister(intr_handle, dev_uev_handler, param);
223 	close(rte_intr_fd_get(intr_handle));
224 	rte_intr_fd_set(intr_handle, -1);
225 }
226 
227 static void
228 dev_uev_handler(__rte_unused void *param)
229 {
230 	struct rte_dev_event uevent;
231 	int ret;
232 	char buf[EAL_UEV_MSG_LEN];
233 	struct rte_bus *bus;
234 	struct rte_device *dev;
235 	const char *busname = "";
236 
237 	memset(&uevent, 0, sizeof(struct rte_dev_event));
238 	memset(buf, 0, EAL_UEV_MSG_LEN);
239 
240 	ret = recv(rte_intr_fd_get(intr_handle), buf, EAL_UEV_MSG_LEN,
241 		   MSG_DONTWAIT);
242 	if (ret < 0 && errno == EAGAIN)
243 		return;
244 	else if (ret <= 0) {
245 		/* connection is closed or broken, can not up again. */
246 		RTE_LOG(ERR, EAL, "uevent socket connection is broken.\n");
247 		rte_eal_alarm_set(1, dev_delayed_unregister, NULL);
248 		return;
249 	}
250 
251 	ret = dev_uev_parse(buf, &uevent, EAL_UEV_MSG_LEN);
252 	if (ret < 0) {
253 		RTE_LOG(DEBUG, EAL, "Ignoring uevent '%s'\n", buf);
254 		return;
255 	}
256 
257 	RTE_LOG(DEBUG, EAL, "receive uevent(name:%s, type:%d, subsystem:%d)\n",
258 		uevent.devname, uevent.type, uevent.subsystem);
259 
260 	switch (uevent.subsystem) {
261 	case EAL_DEV_EVENT_SUBSYSTEM_PCI:
262 	case EAL_DEV_EVENT_SUBSYSTEM_UIO:
263 		busname = "pci";
264 		break;
265 	default:
266 		break;
267 	}
268 
269 	if (uevent.devname) {
270 		if (uevent.type == RTE_DEV_EVENT_REMOVE && hotplug_handle) {
271 			rte_spinlock_lock(&failure_handle_lock);
272 			bus = rte_bus_find_by_name(busname);
273 			if (bus == NULL) {
274 				RTE_LOG(ERR, EAL, "Cannot find bus (%s)\n",
275 					busname);
276 				goto failure_handle_err;
277 			}
278 
279 			dev = bus->find_device(NULL, cmp_dev_name,
280 					       uevent.devname);
281 			if (dev == NULL) {
282 				RTE_LOG(ERR, EAL, "Cannot find device (%s) on "
283 					"bus (%s)\n", uevent.devname, busname);
284 				goto failure_handle_err;
285 			}
286 
287 			ret = bus->hot_unplug_handler(dev);
288 			if (ret) {
289 				RTE_LOG(ERR, EAL, "Can not handle hot-unplug "
290 					"for device (%s)\n", dev->name);
291 			}
292 			rte_spinlock_unlock(&failure_handle_lock);
293 		}
294 		rte_dev_event_callback_process(uevent.devname, uevent.type);
295 		free(uevent.devname);
296 	}
297 
298 	return;
299 
300 failure_handle_err:
301 	rte_spinlock_unlock(&failure_handle_lock);
302 	free(uevent.devname);
303 }
304 
305 int
306 rte_dev_event_monitor_start(void)
307 {
308 	int ret = 0;
309 
310 	rte_rwlock_write_lock(&monitor_lock);
311 
312 	if (monitor_refcount) {
313 		monitor_refcount++;
314 		goto exit;
315 	}
316 
317 	intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
318 	if (intr_handle == NULL) {
319 		RTE_LOG(ERR, EAL, "Fail to allocate intr_handle\n");
320 		goto exit;
321 	}
322 
323 	ret = rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_DEV_EVENT);
324 	if (ret)
325 		goto exit;
326 
327 	ret = rte_intr_fd_set(intr_handle, -1);
328 	if (ret)
329 		goto exit;
330 
331 	ret = dev_uev_socket_fd_create();
332 	if (ret) {
333 		RTE_LOG(ERR, EAL, "error create device event fd.\n");
334 		goto exit;
335 	}
336 
337 	ret = rte_intr_callback_register(intr_handle, dev_uev_handler, NULL);
338 
339 	if (ret) {
340 		close(rte_intr_fd_get(intr_handle));
341 		goto exit;
342 	}
343 
344 	monitor_refcount++;
345 
346 exit:
347 	if (ret) {
348 		rte_intr_instance_free(intr_handle);
349 		intr_handle = NULL;
350 	}
351 	rte_rwlock_write_unlock(&monitor_lock);
352 	return ret;
353 }
354 
355 int
356 rte_dev_event_monitor_stop(void)
357 {
358 	int ret = 0;
359 
360 	rte_rwlock_write_lock(&monitor_lock);
361 
362 	if (!monitor_refcount) {
363 		RTE_LOG(ERR, EAL, "device event monitor already stopped\n");
364 		goto exit;
365 	}
366 
367 	if (monitor_refcount > 1) {
368 		monitor_refcount--;
369 		goto exit;
370 	}
371 
372 	ret = rte_intr_callback_unregister(intr_handle, dev_uev_handler,
373 					   (void *)-1);
374 	if (ret < 0) {
375 		RTE_LOG(ERR, EAL, "fail to unregister uevent callback.\n");
376 		goto exit;
377 	}
378 
379 	close(rte_intr_fd_get(intr_handle));
380 	rte_intr_instance_free(intr_handle);
381 	intr_handle = NULL;
382 
383 	monitor_refcount--;
384 
385 exit:
386 	rte_rwlock_write_unlock(&monitor_lock);
387 
388 	return ret;
389 }
390 
391 int
392 dev_sigbus_handler_register(void)
393 {
394 	sigset_t mask;
395 	struct sigaction action;
396 
397 	rte_errno = 0;
398 
399 	if (sigbus_need_recover)
400 		return 0;
401 
402 	sigemptyset(&mask);
403 	sigaddset(&mask, SIGBUS);
404 	action.sa_flags = SA_SIGINFO;
405 	action.sa_mask = mask;
406 	action.sa_sigaction = sigbus_handler;
407 	sigbus_need_recover = !sigaction(SIGBUS, &action, &sigbus_action_old);
408 
409 	return rte_errno;
410 }
411 
412 int
413 dev_sigbus_handler_unregister(void)
414 {
415 	rte_errno = 0;
416 
417 	sigbus_action_recover();
418 
419 	return rte_errno;
420 }
421 
422 int
423 rte_dev_hotplug_handle_enable(void)
424 {
425 	int ret = 0;
426 
427 	ret = dev_sigbus_handler_register();
428 	if (ret < 0)
429 		RTE_LOG(ERR, EAL,
430 			"fail to register sigbus handler for devices.\n");
431 
432 	hotplug_handle = true;
433 
434 	return ret;
435 }
436 
437 int
438 rte_dev_hotplug_handle_disable(void)
439 {
440 	int ret = 0;
441 
442 	ret = dev_sigbus_handler_unregister();
443 	if (ret < 0)
444 		RTE_LOG(ERR, EAL,
445 			"fail to unregister sigbus handler for devices.\n");
446 
447 	hotplug_handle = false;
448 
449 	return ret;
450 }
451