xref: /netbsd-src/sys/arch/x86/x86/nmi.c (revision ca5e5d48038186af710e3e55d32ddb5791adf0a4)
1 /*	$Id: nmi.c,v 1.6 2022/05/15 12:45:33 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c)2009,2011 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: nmi.c,v 1.6 2022/05/15 12:45:33 riastradh Exp $");
31 
32 /*
33  * nmi dispatcher.
34  *
35  * XXX no need to be nmi-specific.
36  * actual assumptions are:
37  *	- dispatch() is called with preemption disabled.
38  *	- handlers never block.
39  *	- establish() and disestablish() are called within a thread context.
40  *	  (thus can block)
41  */
42 
43 #include <sys/param.h>
44 #include <sys/kernel.h>
45 #include <sys/systm.h>
46 #include <sys/atomic.h>
47 #include <sys/kmem.h>
48 #include <sys/mutex.h>
49 #include <sys/pserialize.h>
50 
51 #include <x86/nmi.h>
52 
53 struct nmi_handler {
54 	int (*n_func)(const struct trapframe *, void *);
55 	void *n_arg;
56 	struct nmi_handler *n_next;
57 };
58 
59 static kmutex_t nmi_list_lock; /* serialize establish and disestablish */
60 static pserialize_t nmi_psz;
61 static nmi_handler_t *nmi_handlers; /* list of handlers */
62 
63 /*
64  * nmi_establish: establish an nmi handler
65  *
66  * => can block.
67  * => returns an opaque handle.
68  */
69 
70 nmi_handler_t *
nmi_establish(int (* func)(const struct trapframe *,void *),void * arg)71 nmi_establish(int (*func)(const struct trapframe *, void *), void *arg)
72 {
73 	struct nmi_handler *n;
74 
75 	n = kmem_alloc(sizeof(*n), KM_SLEEP);
76 	n->n_func = func;
77 	n->n_arg = arg;
78 
79 	/*
80 	 * put it into the list.
81 	 */
82 
83 	mutex_enter(&nmi_list_lock);
84 	n->n_next = nmi_handlers;
85 	atomic_store_release(&nmi_handlers, n);
86 	mutex_exit(&nmi_list_lock);
87 
88 	return n;
89 }
90 
91 /*
92  * nmi_disestablish: disestablish an nmi handler.
93  *
94  * => can block.
95  * => take an opaque handle.  it must be one returned by nmi_establish.
96  */
97 
98 void
nmi_disestablish(nmi_handler_t * handle)99 nmi_disestablish(nmi_handler_t *handle)
100 {
101 	nmi_handler_t *n;
102 	nmi_handler_t **pp;
103 
104 	KASSERT(handle != NULL);
105 
106 	/*
107 	 * remove the handler from the list.
108 	 */
109 
110 	mutex_enter(&nmi_list_lock);
111 	for (pp = &nmi_handlers, n = *pp; n != NULL; n = *pp) {
112 		if (n == handle) {
113 			break;
114 		}
115 		pp = &n->n_next;
116 	}
117 #if defined(DIAGNOSTIC)
118 	if (n == NULL) {
119 		mutex_exit(&nmi_list_lock);
120 		panic("%s: invalid handle %p", __func__, handle);
121 	}
122 #endif /* defined(DIAGNOSTIC) */
123 	atomic_store_relaxed(pp, n->n_next);
124 	mutex_exit(&nmi_list_lock); /* mutex_exit implies a store fence */
125 
126 	/*
127 	 * before freeing 'n', ensure that no other cpus are
128 	 * in the middle of nmi_dispatch.
129 	 */
130 
131 	pserialize_perform(nmi_psz);
132 	kmem_free(n, sizeof(*n));
133 }
134 
135 /*
136  * nmi_dispatch: dispatch an nmi.
137  *
138  * => called by interrupts, thus preempt disabled.
139  */
140 
141 int
nmi_dispatch(const struct trapframe * tf)142 nmi_dispatch(const struct trapframe *tf)
143 {
144 	const struct nmi_handler *n;
145 	int handled = 0;
146 
147 	/*
148 	 * XXX abstraction violation
149 	 *
150 	 * we don't bother to call pserialize_read_enter/pserialize_read_exit
151 	 * because they are not necessary here as we are sure our IPL is
152 	 * higher than IPL_SOFTSERIAL.  better to avoid unnecessary calls as
153 	 * we are in a dangerous context. (NMI)
154 	 */
155 
156 	for (n = atomic_load_consume(&nmi_handlers);
157 	     n != NULL;
158 	     n = atomic_load_relaxed(&n->n_next)) {
159 		handled |= (*n->n_func)(tf, n->n_arg);
160 	}
161 	return handled;
162 }
163 
164 void
nmi_init(void)165 nmi_init(void)
166 {
167 
168 	mutex_init(&nmi_list_lock, MUTEX_DEFAULT, IPL_NONE);
169 	nmi_psz = pserialize_create();
170 }
171