xref: /netbsd-src/sys/kern/subr_interrupt.c (revision 03dcb730d46d34d85c9f496c1f5a3a6a43f2b7b3)
1 /*	$NetBSD: subr_interrupt.c,v 1.2 2017/06/01 02:45:13 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 2015 Internet Initiative Japan Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: subr_interrupt.c,v 1.2 2017/06/01 02:45:13 chs Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/errno.h>
36 #include <sys/cpu.h>
37 #include <sys/interrupt.h>
38 #include <sys/intr.h>
39 #include <sys/kcpuset.h>
40 #include <sys/kmem.h>
41 #include <sys/proc.h>
42 #include <sys/xcall.h>
43 #include <sys/sysctl.h>
44 
45 #include <sys/conf.h>
46 #include <sys/intrio.h>
47 #include <sys/kauth.h>
48 
49 #include <machine/limits.h>
50 
51 #ifdef INTR_DEBUG
52 #define DPRINTF(msg) printf msg
53 #else
54 #define DPRINTF(msg)
55 #endif
56 
57 static struct intrio_set kintrio_set = { "\0", NULL, 0 };
58 
59 #define UNSET_NOINTR_SHIELD	0
60 #define SET_NOINTR_SHIELD	1
61 
62 static void
63 interrupt_shield_xcall(void *arg1, void *arg2)
64 {
65 	struct cpu_info *ci;
66 	struct schedstate_percpu *spc;
67 	int s, shield;
68 
69 	ci = arg1;
70 	shield = (int)(intptr_t)arg2;
71 	spc = &ci->ci_schedstate;
72 
73 	s = splsched();
74 	if (shield == UNSET_NOINTR_SHIELD)
75 		spc->spc_flags &= ~SPCF_NOINTR;
76 	else if (shield == SET_NOINTR_SHIELD)
77 		spc->spc_flags |= SPCF_NOINTR;
78 	splx(s);
79 }
80 
81 /*
82  * Change SPCF_NOINTR flag of schedstate_percpu->spc_flags.
83  */
84 static int
85 interrupt_shield(u_int cpu_idx, int shield)
86 {
87 	struct cpu_info *ci;
88 	struct schedstate_percpu *spc;
89 
90 	KASSERT(mutex_owned(&cpu_lock));
91 
92 	ci = cpu_lookup(cpu_idx);
93 	if (ci == NULL)
94 		return EINVAL;
95 
96 	spc = &ci->ci_schedstate;
97 	if (shield == UNSET_NOINTR_SHIELD) {
98 		if ((spc->spc_flags & SPCF_NOINTR) == 0)
99 			return 0;
100 	} else if (shield == SET_NOINTR_SHIELD) {
101 		if ((spc->spc_flags & SPCF_NOINTR) != 0)
102 			return 0;
103 	}
104 
105 	if (ci == curcpu() || !mp_online) {
106 		interrupt_shield_xcall(ci, (void *)(intptr_t)shield);
107 	} else {
108 		uint64_t where;
109 		where = xc_unicast(0, interrupt_shield_xcall, ci,
110 			(void *)(intptr_t)shield, ci);
111 		xc_wait(where);
112 	}
113 
114 	spc->spc_lastmod = time_second;
115 	return 0;
116 }
117 
118 /*
119  * Move all assigned interrupts from "cpu_idx" to the other cpu as possible.
120  * The destination cpu is the lowest cpuid of available cpus.
121  * If there are no available cpus, give up to move interrupts.
122  */
123 static int
124 interrupt_avert_intr(u_int cpu_idx)
125 {
126 	kcpuset_t *cpuset;
127 	struct intrids_handler *ii_handler;
128 	intrid_t *ids;
129 	int error, i, nids;
130 
131 	kcpuset_create(&cpuset, true);
132 	kcpuset_set(cpuset, cpu_idx);
133 
134 	ii_handler = interrupt_construct_intrids(cpuset);
135 	if (ii_handler == NULL) {
136 		error = ENOMEM;
137 		goto out;
138 	}
139 	nids = ii_handler->iih_nids;
140 	if (nids == 0) {
141 		error = 0;
142 		goto destruct_out;
143 	}
144 
145 	interrupt_get_available(cpuset);
146 	kcpuset_clear(cpuset, cpu_idx);
147 	if (kcpuset_iszero(cpuset)) {
148 		DPRINTF(("%s: no available cpu\n", __func__));
149 		error = ENOENT;
150 		goto destruct_out;
151 	}
152 
153 	ids = ii_handler->iih_intrids;
154 	for (i = 0; i < nids; i++) {
155 		error = interrupt_distribute_handler(ids[i], cpuset, NULL);
156 		if (error)
157 			break;
158 	}
159 
160  destruct_out:
161 	interrupt_destruct_intrids(ii_handler);
162  out:
163 	kcpuset_destroy(cpuset);
164 	return error;
165 }
166 
167 /*
168  * Return actual intrio_list_line size.
169  * intrio_list_line size is variable by ncpu.
170  */
171 static size_t
172 interrupt_intrio_list_line_size(void)
173 {
174 
175 	return sizeof(struct intrio_list_line) +
176 		sizeof(struct intrio_list_line_cpu) * (ncpu - 1);
177 }
178 
179 /*
180  * Return the size of interrupts list data on success.
181  * Reterun 0 on failed.
182  */
183 static size_t
184 interrupt_intrio_list_size(void)
185 {
186 	struct intrids_handler *ii_handler;
187 	size_t ilsize;
188 
189 	ilsize = 0;
190 
191 	/* buffer header */
192 	ilsize += sizeof(struct intrio_list);
193 
194 	/* il_line body */
195 	ii_handler = interrupt_construct_intrids(kcpuset_running);
196 	if (ii_handler == NULL)
197 		return 0;
198 	ilsize += interrupt_intrio_list_line_size() * (ii_handler->iih_nids);
199 
200 	interrupt_destruct_intrids(ii_handler);
201 	return ilsize;
202 }
203 
204 /*
205  * Set intrctl list data to "il", and return list structure bytes.
206  * If error occured, return <0.
207  * If "data" == NULL, simply return list structure bytes.
208  */
209 static int
210 interrupt_intrio_list(struct intrio_list *il, int length)
211 {
212 	struct intrio_list_line *illine;
213 	kcpuset_t *assigned, *avail;
214 	struct intrids_handler *ii_handler;
215 	intrid_t *ids;
216 	size_t ilsize;
217 	u_int cpu_idx;
218 	int nids, intr_idx, ret, line_size;
219 
220 	ilsize = interrupt_intrio_list_size();
221 	if (ilsize == 0)
222 		return -ENOMEM;
223 
224 	if (il == NULL)
225 		return ilsize;
226 
227 	if (length < ilsize)
228 		return -ENOMEM;
229 
230 	illine = (struct intrio_list_line *)
231 		((char *)il + sizeof(struct intrio_list));
232 	il->il_lineoffset = (off_t)((uintptr_t)illine - (uintptr_t)il);
233 
234 	kcpuset_create(&avail, true);
235 	interrupt_get_available(avail);
236 	kcpuset_create(&assigned, true);
237 
238 	ii_handler = interrupt_construct_intrids(kcpuset_running);
239 	if (ii_handler == NULL) {
240 		DPRINTF(("%s: interrupt_construct_intrids() failed\n",
241 			__func__));
242 		ret = -ENOMEM;
243 		goto out;
244 	}
245 
246 	line_size = interrupt_intrio_list_line_size();
247 	/* ensure interrupts are not added after interrupt_intrio_list_size(). */
248 	nids = ii_handler->iih_nids;
249 	ids = ii_handler->iih_intrids;
250 	if (ilsize < sizeof(struct intrio_list) + line_size * nids) {
251 		DPRINTF(("%s: interrupts are added during execution.\n",
252 			__func__));
253 		ret = -ENOMEM;
254 		goto destruct_out;
255 	}
256 
257 	for (intr_idx = 0; intr_idx < nids; intr_idx++) {
258 		char devname[INTRDEVNAMEBUF];
259 
260 		strncpy(illine->ill_intrid, ids[intr_idx], INTRIDBUF);
261 		interrupt_get_devname(ids[intr_idx], devname, sizeof(devname));
262 		strncpy(illine->ill_xname, devname, INTRDEVNAMEBUF);
263 
264 		interrupt_get_assigned(ids[intr_idx], assigned);
265 		for (cpu_idx = 0; cpu_idx < ncpu; cpu_idx++) {
266 			struct intrio_list_line_cpu *illcpu =
267 				&illine->ill_cpu[cpu_idx];
268 
269 			illcpu->illc_assigned =
270 				kcpuset_isset(assigned, cpu_idx) ? true : false;
271 			illcpu->illc_count =
272 				interrupt_get_count(ids[intr_idx], cpu_idx);
273 		}
274 
275 		illine = (struct intrio_list_line *)
276 			((char *)illine + line_size);
277 	}
278 
279 	ret = ilsize;
280 	il->il_version = INTRIO_LIST_VERSION;
281 	il->il_ncpus = ncpu;
282 	il->il_nintrs = nids;
283 	il->il_linesize = line_size;
284 	il->il_bufsize = ilsize;
285 
286  destruct_out:
287 	interrupt_destruct_intrids(ii_handler);
288  out:
289 	kcpuset_destroy(assigned);
290 	kcpuset_destroy(avail);
291 
292 	return ret;
293 }
294 
295 /*
296  * "intrctl list" entry
297  */
298 static int
299 interrupt_intrio_list_sysctl(SYSCTLFN_ARGS)
300 {
301 	int ret, error;
302 	void *buf;
303 
304 	if (oldlenp == NULL)
305 		return EINVAL;
306 
307 	/*
308 	 * If oldp == NULL, the sysctl(8) caller process want to get the size of
309 	 * intrctl list data only.
310 	 */
311 	if (oldp == NULL) {
312 		ret = interrupt_intrio_list(NULL, 0);
313 		if (ret < 0)
314 			return -ret;
315 
316 		*oldlenp = ret;
317 		return 0;
318 	}
319 
320 	/*
321 	 * If oldp != NULL, the sysctl(8) caller process want to get both the size
322 	 * and the contents of intrctl list data.
323 	 */
324 	if (*oldlenp == 0)
325 		return ENOMEM;
326 
327 	buf = kmem_zalloc(*oldlenp, KM_SLEEP);
328 	ret = interrupt_intrio_list(buf, *oldlenp);
329 	if (ret < 0) {
330 		error = -ret;
331 		goto out;
332 	}
333 	error = copyout(buf, oldp, *oldlenp);
334 
335  out:
336 	kmem_free(buf, *oldlenp);
337 	return error;
338 }
339 
340 /*
341  * "intrctl affinity" entry
342  */
343 static int
344 interrupt_set_affinity_sysctl(SYSCTLFN_ARGS)
345 {
346 	struct sysctlnode node;
347 	struct intrio_set *iset;
348 	cpuset_t *ucpuset;
349 	kcpuset_t *kcpuset;
350 	int error;
351 
352 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_INTR,
353 	    KAUTH_REQ_SYSTEM_INTR_AFFINITY, NULL, NULL, NULL);
354 	if (error)
355 		return EPERM;
356 
357 	node = *rnode;
358 	iset = (struct intrio_set *)node.sysctl_data;
359 
360 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
361 	if (error != 0 || newp == NULL)
362 		return error;
363 
364 	ucpuset = iset->cpuset;
365 	kcpuset_create(&kcpuset, true);
366 	error = kcpuset_copyin(ucpuset, kcpuset, iset->cpuset_size);
367 	if (error)
368 		goto out;
369 	if (kcpuset_iszero(kcpuset)) {
370 		error = EINVAL;
371 		goto out;
372 	}
373 
374 	error = interrupt_distribute_handler(iset->intrid, kcpuset, NULL);
375 
376  out:
377 	kcpuset_destroy(kcpuset);
378 	return error;
379 }
380 
381 /*
382  * "intrctl intr" entry
383  */
384 static int
385 interrupt_intr_sysctl(SYSCTLFN_ARGS)
386 {
387 	struct sysctlnode node;
388 	struct intrio_set *iset;
389 	cpuset_t *ucpuset;
390 	kcpuset_t *kcpuset;
391 	int error;
392 	u_int cpu_idx;
393 
394 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CPU,
395 	    KAUTH_REQ_SYSTEM_CPU_SETSTATE, NULL, NULL, NULL);
396 	if (error)
397 		return EPERM;
398 
399 	node = *rnode;
400 	iset = (struct intrio_set *)node.sysctl_data;
401 
402 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
403 	if (error != 0 || newp == NULL)
404 		return error;
405 
406 	ucpuset = iset->cpuset;
407 	kcpuset_create(&kcpuset, true);
408 	error = kcpuset_copyin(ucpuset, kcpuset, iset->cpuset_size);
409 	if (error)
410 		goto out;
411 	if (kcpuset_iszero(kcpuset)) {
412 		error = EINVAL;
413 		goto out;
414 	}
415 
416 	cpu_idx = kcpuset_ffs(kcpuset) - 1; /* support one CPU only */
417 
418 	mutex_enter(&cpu_lock);
419 	error = interrupt_shield(cpu_idx, UNSET_NOINTR_SHIELD);
420 	mutex_exit(&cpu_lock);
421 
422  out:
423 	kcpuset_destroy(kcpuset);
424 	return error;
425 }
426 
427 /*
428  * "intrctl nointr" entry
429  */
430 static int
431 interrupt_nointr_sysctl(SYSCTLFN_ARGS)
432 {
433 	struct sysctlnode node;
434 	struct intrio_set *iset;
435 	cpuset_t *ucpuset;
436 	kcpuset_t *kcpuset;
437 	int error;
438 	u_int cpu_idx;
439 
440 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_CPU,
441 	    KAUTH_REQ_SYSTEM_CPU_SETSTATE, NULL, NULL, NULL);
442 	if (error)
443 		return EPERM;
444 
445 	node = *rnode;
446 	iset = (struct intrio_set *)node.sysctl_data;
447 
448 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
449 	if (error != 0 || newp == NULL)
450 		return error;
451 
452 	ucpuset = iset->cpuset;
453 	kcpuset_create(&kcpuset, true);
454 	error = kcpuset_copyin(ucpuset, kcpuset, iset->cpuset_size);
455 	if (error)
456 		goto out;
457 	if (kcpuset_iszero(kcpuset)) {
458 		error = EINVAL;
459 		goto out;
460 	}
461 
462 	cpu_idx = kcpuset_ffs(kcpuset) - 1; /* support one CPU only */
463 
464 	mutex_enter(&cpu_lock);
465 	error = interrupt_shield(cpu_idx, SET_NOINTR_SHIELD);
466 	mutex_exit(&cpu_lock);
467 	if (error)
468 		goto out;
469 
470 	error = interrupt_avert_intr(cpu_idx);
471 
472  out:
473 	kcpuset_destroy(kcpuset);
474 	return error;
475 }
476 
477 SYSCTL_SETUP(sysctl_interrupt_setup, "sysctl interrupt setup")
478 {
479 	const struct sysctlnode *node = NULL;
480 
481 	sysctl_createv(clog, 0, NULL, &node,
482 		       CTLFLAG_PERMANENT, CTLTYPE_NODE,
483 		       "intr", SYSCTL_DESCR("Interrupt options"),
484 		       NULL, 0, NULL, 0,
485 		       CTL_KERN, CTL_CREATE, CTL_EOL);
486 
487 	sysctl_createv(clog, 0, &node, NULL,
488 		       CTLFLAG_PERMANENT, CTLTYPE_STRUCT,
489 		       "list", SYSCTL_DESCR("intrctl list"),
490 		       interrupt_intrio_list_sysctl, 0, NULL,
491 		        0, CTL_CREATE, CTL_EOL);
492 
493 	sysctl_createv(clog, 0, &node, NULL,
494 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_STRUCT,
495 		       "affinity", SYSCTL_DESCR("set affinity"),
496 		       interrupt_set_affinity_sysctl, 0, &kintrio_set,
497 		       sizeof(kintrio_set), CTL_CREATE, CTL_EOL);
498 
499 	sysctl_createv(clog, 0, &node, NULL,
500 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_STRUCT,
501 		       "intr", SYSCTL_DESCR("set intr"),
502 		       interrupt_intr_sysctl, 0, &kintrio_set,
503 		       sizeof(kintrio_set), CTL_CREATE, CTL_EOL);
504 
505 	sysctl_createv(clog, 0, &node, NULL,
506 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_STRUCT,
507 		       "nointr", SYSCTL_DESCR("set nointr"),
508 		       interrupt_nointr_sysctl, 0, &kintrio_set,
509 		       sizeof(kintrio_set), CTL_CREATE, CTL_EOL);
510 }
511