xref: /dpdk/lib/eal/linux/eal_dev.c (revision c2bd9367e18f5b00c1a3c5eb281a512ef52c5dfd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <unistd.h>
7 #include <fcntl.h>
8 #include <signal.h>
9 #include <sys/socket.h>
10 #include <linux/netlink.h>
11 
12 #include <rte_string_fns.h>
13 #include <rte_log.h>
14 #include <rte_compat.h>
15 #include <rte_dev.h>
16 #include <rte_malloc.h>
17 #include <rte_interrupts.h>
18 #include <rte_alarm.h>
19 #include <rte_bus.h>
20 #include <rte_eal.h>
21 #include <rte_spinlock.h>
22 #include <rte_errno.h>
23 
24 #include "eal_private.h"
25 
26 static struct rte_intr_handle *intr_handle;
27 static rte_rwlock_t monitor_lock = RTE_RWLOCK_INITIALIZER;
28 static uint32_t monitor_refcount;
29 static bool hotplug_handle;
30 
31 #define EAL_UEV_MSG_LEN 4096
32 #define EAL_UEV_MSG_ELEM_LEN 128
33 
34 /*
35  * spinlock for device hot-unplug failure handling. If it try to access bus or
36  * device, such as handle sigbus on bus or handle memory failure for device
37  * just need to use this lock. It could protect the bus and the device to avoid
38  * race condition.
39  */
40 static rte_spinlock_t failure_handle_lock = RTE_SPINLOCK_INITIALIZER;
41 
42 static struct sigaction sigbus_action_old;
43 
44 static int sigbus_need_recover;
45 
46 static void dev_uev_handler(__rte_unused void *param);
47 
48 /* identify the system layer which reports this event. */
49 enum eal_dev_event_subsystem {
50 	EAL_DEV_EVENT_SUBSYSTEM_PCI, /* PCI bus device event */
51 	EAL_DEV_EVENT_SUBSYSTEM_UIO, /* UIO driver device event */
52 	EAL_DEV_EVENT_SUBSYSTEM_VFIO, /* VFIO driver device event */
53 	EAL_DEV_EVENT_SUBSYSTEM_MAX
54 };
55 
56 static void
57 sigbus_action_recover(void)
58 {
59 	if (sigbus_need_recover) {
60 		sigaction(SIGBUS, &sigbus_action_old, NULL);
61 		sigbus_need_recover = 0;
62 	}
63 }
64 
65 static void sigbus_handler(int signum, siginfo_t *info,
66 				void *ctx __rte_unused)
67 {
68 	int ret;
69 
70 	RTE_LOG(DEBUG, EAL, "Thread catch SIGBUS, fault address:%p\n",
71 		info->si_addr);
72 
73 	rte_spinlock_lock(&failure_handle_lock);
74 	ret = rte_bus_sigbus_handler(info->si_addr);
75 	rte_spinlock_unlock(&failure_handle_lock);
76 	if (ret == -1) {
77 		rte_exit(EXIT_FAILURE,
78 			 "Failed to handle SIGBUS for hot-unplug, "
79 			 "(rte_errno: %s)!", strerror(rte_errno));
80 	} else if (ret == 1) {
81 		if (sigbus_action_old.sa_flags == SA_SIGINFO
82 		    && sigbus_action_old.sa_sigaction) {
83 			(*(sigbus_action_old.sa_sigaction))(signum,
84 							    info, ctx);
85 		} else if (sigbus_action_old.sa_flags != SA_SIGINFO
86 			   && sigbus_action_old.sa_handler) {
87 			(*(sigbus_action_old.sa_handler))(signum);
88 		} else {
89 			rte_exit(EXIT_FAILURE,
90 				 "Failed to handle generic SIGBUS!");
91 		}
92 	}
93 
94 	RTE_LOG(DEBUG, EAL, "Success to handle SIGBUS for hot-unplug!\n");
95 }
96 
97 static int cmp_dev_name(const struct rte_device *dev,
98 	const void *_name)
99 {
100 	const char *name = _name;
101 
102 	return strcmp(dev->name, name);
103 }
104 
105 static int
106 dev_uev_socket_fd_create(void)
107 {
108 	struct sockaddr_nl addr;
109 	int ret, fd;
110 
111 	fd = socket(PF_NETLINK, SOCK_RAW | SOCK_CLOEXEC | SOCK_NONBLOCK,
112 		    NETLINK_KOBJECT_UEVENT);
113 	if (fd < 0) {
114 		RTE_LOG(ERR, EAL, "create uevent fd failed.\n");
115 		return -1;
116 	}
117 
118 	memset(&addr, 0, sizeof(addr));
119 	addr.nl_family = AF_NETLINK;
120 	addr.nl_pid = 0;
121 	addr.nl_groups = 0xffffffff;
122 
123 	ret = bind(fd, (struct sockaddr *) &addr, sizeof(addr));
124 	if (ret < 0) {
125 		RTE_LOG(ERR, EAL, "Failed to bind uevent socket.\n");
126 		goto err;
127 	}
128 
129 	if (rte_intr_fd_set(intr_handle, fd))
130 		goto err;
131 
132 	return 0;
133 err:
134 	close(fd);
135 	fd = -1;
136 	return ret;
137 }
138 
139 struct rte_dev_event {
140 	enum rte_dev_event_type type;	/**< device event type */
141 	int subsystem;			/**< subsystem id */
142 	char *devname;			/**< device name */
143 };
144 
145 static int
146 dev_uev_parse(const char *buf, struct rte_dev_event *event, int length)
147 {
148 	char action[EAL_UEV_MSG_ELEM_LEN];
149 	char subsystem[EAL_UEV_MSG_ELEM_LEN];
150 	char pci_slot_name[EAL_UEV_MSG_ELEM_LEN];
151 	int i = 0;
152 
153 	memset(action, 0, EAL_UEV_MSG_ELEM_LEN);
154 	memset(subsystem, 0, EAL_UEV_MSG_ELEM_LEN);
155 	memset(pci_slot_name, 0, EAL_UEV_MSG_ELEM_LEN);
156 
157 	while (i < length) {
158 		for (; i < length; i++) {
159 			if (*buf)
160 				break;
161 			buf++;
162 		}
163 		/**
164 		 * check device uevent from kernel side, no need to check
165 		 * uevent from udev.
166 		 */
167 		if (!strncmp(buf, "libudev", 7)) {
168 			buf += 7;
169 			i += 7;
170 			return -1;
171 		}
172 		if (!strncmp(buf, "ACTION=", 7)) {
173 			buf += 7;
174 			i += 7;
175 			strlcpy(action, buf, sizeof(action));
176 		} else if (!strncmp(buf, "SUBSYSTEM=", 10)) {
177 			buf += 10;
178 			i += 10;
179 			strlcpy(subsystem, buf, sizeof(subsystem));
180 		} else if (!strncmp(buf, "PCI_SLOT_NAME=", 14)) {
181 			buf += 14;
182 			i += 14;
183 			strlcpy(pci_slot_name, buf, sizeof(subsystem));
184 			event->devname = strdup(pci_slot_name);
185 		}
186 		for (; i < length; i++) {
187 			if (*buf == '\0')
188 				break;
189 			buf++;
190 		}
191 	}
192 
193 	/* parse the subsystem layer */
194 	if (!strncmp(subsystem, "uio", 3))
195 		event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_UIO;
196 	else if (!strncmp(subsystem, "pci", 3))
197 		event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_PCI;
198 	else if (!strncmp(subsystem, "vfio", 4))
199 		event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_VFIO;
200 	else
201 		goto err;
202 
203 	/* parse the action type */
204 	if (!strncmp(action, "add", 3))
205 		event->type = RTE_DEV_EVENT_ADD;
206 	else if (!strncmp(action, "remove", 6))
207 		event->type = RTE_DEV_EVENT_REMOVE;
208 	else
209 		goto err;
210 	return 0;
211 err:
212 	free(event->devname);
213 	return -1;
214 }
215 
216 static void
217 dev_delayed_unregister(void *param)
218 {
219 	rte_intr_callback_unregister(intr_handle, dev_uev_handler, param);
220 	close(rte_intr_fd_get(intr_handle));
221 	rte_intr_fd_set(intr_handle, -1);
222 }
223 
224 static void
225 dev_uev_handler(__rte_unused void *param)
226 {
227 	struct rte_dev_event uevent;
228 	int ret;
229 	char buf[EAL_UEV_MSG_LEN];
230 	struct rte_bus *bus;
231 	struct rte_device *dev;
232 	const char *busname = "";
233 
234 	memset(&uevent, 0, sizeof(struct rte_dev_event));
235 	memset(buf, 0, EAL_UEV_MSG_LEN);
236 
237 	ret = recv(rte_intr_fd_get(intr_handle), buf, EAL_UEV_MSG_LEN,
238 		   MSG_DONTWAIT);
239 	if (ret < 0 && errno == EAGAIN)
240 		return;
241 	else if (ret <= 0) {
242 		/* connection is closed or broken, can not up again. */
243 		RTE_LOG(ERR, EAL, "uevent socket connection is broken.\n");
244 		rte_eal_alarm_set(1, dev_delayed_unregister, NULL);
245 		return;
246 	}
247 
248 	ret = dev_uev_parse(buf, &uevent, EAL_UEV_MSG_LEN);
249 	if (ret < 0) {
250 		RTE_LOG(DEBUG, EAL, "Ignoring uevent '%s'\n", buf);
251 		return;
252 	}
253 
254 	RTE_LOG(DEBUG, EAL, "receive uevent(name:%s, type:%d, subsystem:%d)\n",
255 		uevent.devname, uevent.type, uevent.subsystem);
256 
257 	switch (uevent.subsystem) {
258 	case EAL_DEV_EVENT_SUBSYSTEM_PCI:
259 	case EAL_DEV_EVENT_SUBSYSTEM_UIO:
260 		busname = "pci";
261 		break;
262 	default:
263 		break;
264 	}
265 
266 	if (uevent.devname) {
267 		if (uevent.type == RTE_DEV_EVENT_REMOVE && hotplug_handle) {
268 			rte_spinlock_lock(&failure_handle_lock);
269 			bus = rte_bus_find_by_name(busname);
270 			if (bus == NULL) {
271 				RTE_LOG(ERR, EAL, "Cannot find bus (%s)\n",
272 					busname);
273 				goto failure_handle_err;
274 			}
275 
276 			dev = bus->find_device(NULL, cmp_dev_name,
277 					       uevent.devname);
278 			if (dev == NULL) {
279 				RTE_LOG(ERR, EAL, "Cannot find device (%s) on "
280 					"bus (%s)\n", uevent.devname, busname);
281 				goto failure_handle_err;
282 			}
283 
284 			ret = bus->hot_unplug_handler(dev);
285 			if (ret) {
286 				RTE_LOG(ERR, EAL, "Can not handle hot-unplug "
287 					"for device (%s)\n", dev->name);
288 			}
289 			rte_spinlock_unlock(&failure_handle_lock);
290 		}
291 		rte_dev_event_callback_process(uevent.devname, uevent.type);
292 		free(uevent.devname);
293 	}
294 
295 	return;
296 
297 failure_handle_err:
298 	rte_spinlock_unlock(&failure_handle_lock);
299 	free(uevent.devname);
300 }
301 
302 int
303 rte_dev_event_monitor_start(void)
304 {
305 	int ret = 0;
306 
307 	rte_rwlock_write_lock(&monitor_lock);
308 
309 	if (monitor_refcount) {
310 		monitor_refcount++;
311 		goto exit;
312 	}
313 
314 	intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
315 	if (intr_handle == NULL) {
316 		RTE_LOG(ERR, EAL, "Fail to allocate intr_handle\n");
317 		goto exit;
318 	}
319 
320 	if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_DEV_EVENT))
321 		goto exit;
322 
323 	if (rte_intr_fd_set(intr_handle, -1))
324 		goto exit;
325 
326 	ret = dev_uev_socket_fd_create();
327 	if (ret) {
328 		RTE_LOG(ERR, EAL, "error create device event fd.\n");
329 		goto exit;
330 	}
331 
332 	ret = rte_intr_callback_register(intr_handle, dev_uev_handler, NULL);
333 
334 	if (ret) {
335 		close(rte_intr_fd_get(intr_handle));
336 		goto exit;
337 	}
338 
339 	monitor_refcount++;
340 
341 exit:
342 	rte_intr_instance_free(intr_handle);
343 	rte_rwlock_write_unlock(&monitor_lock);
344 	return ret;
345 }
346 
347 int
348 rte_dev_event_monitor_stop(void)
349 {
350 	int ret = 0;
351 
352 	rte_rwlock_write_lock(&monitor_lock);
353 
354 	if (!monitor_refcount) {
355 		RTE_LOG(ERR, EAL, "device event monitor already stopped\n");
356 		goto exit;
357 	}
358 
359 	if (monitor_refcount > 1) {
360 		monitor_refcount--;
361 		goto exit;
362 	}
363 
364 	ret = rte_intr_callback_unregister(intr_handle, dev_uev_handler,
365 					   (void *)-1);
366 	if (ret < 0) {
367 		RTE_LOG(ERR, EAL, "fail to unregister uevent callback.\n");
368 		goto exit;
369 	}
370 
371 	close(rte_intr_fd_get(intr_handle));
372 	rte_intr_instance_free(intr_handle);
373 
374 	monitor_refcount--;
375 
376 exit:
377 	rte_rwlock_write_unlock(&monitor_lock);
378 
379 	return ret;
380 }
381 
382 int
383 dev_sigbus_handler_register(void)
384 {
385 	sigset_t mask;
386 	struct sigaction action;
387 
388 	rte_errno = 0;
389 
390 	if (sigbus_need_recover)
391 		return 0;
392 
393 	sigemptyset(&mask);
394 	sigaddset(&mask, SIGBUS);
395 	action.sa_flags = SA_SIGINFO;
396 	action.sa_mask = mask;
397 	action.sa_sigaction = sigbus_handler;
398 	sigbus_need_recover = !sigaction(SIGBUS, &action, &sigbus_action_old);
399 
400 	return rte_errno;
401 }
402 
403 int
404 dev_sigbus_handler_unregister(void)
405 {
406 	rte_errno = 0;
407 
408 	sigbus_action_recover();
409 
410 	return rte_errno;
411 }
412 
413 int
414 rte_dev_hotplug_handle_enable(void)
415 {
416 	int ret = 0;
417 
418 	ret = dev_sigbus_handler_register();
419 	if (ret < 0)
420 		RTE_LOG(ERR, EAL,
421 			"fail to register sigbus handler for devices.\n");
422 
423 	hotplug_handle = true;
424 
425 	return ret;
426 }
427 
428 int
429 rte_dev_hotplug_handle_disable(void)
430 {
431 	int ret = 0;
432 
433 	ret = dev_sigbus_handler_unregister();
434 	if (ret < 0)
435 		RTE_LOG(ERR, EAL,
436 			"fail to unregister sigbus handler for devices.\n");
437 
438 	hotplug_handle = false;
439 
440 	return ret;
441 }
442