xref: /netbsd-src/sys/arch/usermode/dev/cpu.c (revision 9ddb6ab554e70fb9bbd90c3d96b812bc57755a14)
1 /* $NetBSD: cpu.c,v 1.70 2012/03/03 21:15:15 reinoud Exp $ */
2 
3 /*-
4  * Copyright (c) 2007 Jared D. McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "opt_cpu.h"
30 #include "opt_hz.h"
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.70 2012/03/03 21:15:15 reinoud Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/proc.h>
38 #include <sys/systm.h>
39 #include <sys/device.h>
40 #include <sys/reboot.h>
41 #include <sys/lwp.h>
42 #include <sys/cpu.h>
43 #include <sys/mbuf.h>
44 #include <sys/msgbuf.h>
45 #include <sys/kmem.h>
46 #include <sys/kernel.h>
47 #include <sys/mount.h>
48 
49 #include <dev/cons.h>
50 
51 #include <machine/cpu.h>
52 #include <machine/mainbus.h>
53 #include <machine/pcb.h>
54 #include <machine/machdep.h>
55 #include <machine/thunk.h>
56 
57 #include <uvm/uvm_extern.h>
58 #include <uvm/uvm_page.h>
59 
60 #if __GNUC_PREREQ__(4,4)
61 #define cpu_unreachable()	__builtin_unreachable()
62 #else
63 #define cpu_unreachable()	do { thunk_abort(); } while (0)
64 #endif
65 
66 static int	cpu_match(device_t, cfdata_t, void *);
67 static void	cpu_attach(device_t, device_t, void *);
68 
69 struct cpu_info cpu_info_primary = {
70 	.ci_dev = 0,
71 	.ci_self = &cpu_info_primary,
72 	.ci_idepth = -1,
73 	.ci_curlwp = &lwp0,
74 };
75 
76 char cpu_model[48] = "virtual processor";
77 
78 typedef struct cpu_softc {
79 	device_t	sc_dev;
80 	struct cpu_info	*sc_ci;
81 
82 	ucontext_t	sc_ucp;
83 	uint8_t		sc_ucp_stack[PAGE_SIZE];
84 } cpu_softc_t;
85 
86 
87 /* statics */
88 static struct pcb lwp0pcb;
89 static void *um_msgbuf;
90 
91 
92 /* attachment */
93 CFATTACH_DECL_NEW(cpu, sizeof(cpu_softc_t), cpu_match, cpu_attach, NULL, NULL);
94 
95 static int
96 cpu_match(device_t parent, cfdata_t match, void *opaque)
97 {
98 	struct thunkbus_attach_args *taa = opaque;
99 
100 	if (taa->taa_type != THUNKBUS_TYPE_CPU)
101 		return 0;
102 
103 	return 1;
104 }
105 
106 static void
107 cpu_attach(device_t parent, device_t self, void *opaque)
108 {
109 	cpu_softc_t *sc = device_private(self);
110 
111 	aprint_naive("\n");
112 	aprint_normal("\n");
113 
114 	cpu_info_primary.ci_dev = self;
115 	sc->sc_dev = self;
116 	sc->sc_ci = &cpu_info_primary;
117 
118 	thunk_getcontext(&sc->sc_ucp);
119 	sc->sc_ucp.uc_stack.ss_sp = sc->sc_ucp_stack;
120 	sc->sc_ucp.uc_stack.ss_size = PAGE_SIZE - sizeof(register_t);
121 	sc->sc_ucp.uc_flags = _UC_STACK | _UC_CPU | _UC_SIGMASK;
122 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGALRM);
123 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGIO);
124 }
125 
126 void
127 cpu_configure(void)
128 {
129 	if (config_rootfound("mainbus", NULL) == NULL)
130 		panic("configure: mainbus not configured");
131 
132 	spl0();
133 }
134 
135 
136 /* main guts */
137 void
138 cpu_reboot(int howto, char *bootstr)
139 {
140 	extern void usermode_reboot(void);
141 
142 	if (cold)
143 		howto |= RB_HALT;
144 
145 	if ((howto & RB_NOSYNC) == 0)
146 		vfs_shutdown();
147 	else
148 		suspendsched();
149 
150 	doshutdownhooks();
151 	pmf_system_shutdown(boothowto);
152 
153 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN)
154 		thunk_exit(0);
155 
156 	splhigh();
157 
158 	if (howto & RB_DUMP)
159 		thunk_abort();
160 
161 	if (howto & RB_HALT) {
162 		printf("\n");
163 		printf("The operating system has halted.\n");
164 		printf("Please press any key to reboot.\n\n");
165 		cnpollc(1);
166 		cngetc();
167 		cnpollc(0);
168 	}
169 
170 	printf("rebooting...\n");
171 
172 	usermode_reboot();
173 
174 	/* NOTREACHED */
175 	cpu_unreachable();
176 }
177 
178 void
179 cpu_need_resched(struct cpu_info *ci, int flags)
180 {
181 	ci->ci_want_resched |= flags;
182 	aston(ci);
183 }
184 
185 void
186 cpu_need_proftick(struct lwp *l)
187 {
188 }
189 
190 static
191 void
192 cpu_switchto_atomic(lwp_t *oldlwp, lwp_t *newlwp)
193 {
194 	struct pcb *oldpcb = oldlwp ? lwp_getpcb(oldlwp) : NULL;
195 	struct pcb *newpcb = lwp_getpcb(newlwp);
196 	struct cpu_info *ci = curcpu();
197 
198 	ci->ci_stash = oldlwp;
199 
200 	if (oldpcb)
201 		oldpcb->pcb_errno = thunk_geterrno();
202 
203 	thunk_seterrno(newpcb->pcb_errno);
204 
205 	curlwp = newlwp;
206 	if (thunk_setcontext(&newpcb->pcb_ucp))
207 		panic("setcontext failed");
208 	/* not reached */
209 }
210 
211 lwp_t *
212 cpu_switchto(lwp_t *oldlwp, lwp_t *newlwp, bool returning)
213 {
214 	struct pcb *oldpcb = oldlwp ? lwp_getpcb(oldlwp) : NULL;
215 	struct pcb *newpcb = lwp_getpcb(newlwp);
216 	struct cpu_info *ci = curcpu();
217 	cpu_softc_t *sc = device_private(ci->ci_dev);
218 
219 #ifdef CPU_DEBUG
220 	thunk_printf_debug("cpu_switchto [%s,pid=%d,lid=%d] -> [%s,pid=%d,lid=%d]\n",
221 	    oldlwp ? oldlwp->l_name : "none",
222 	    oldlwp ? oldlwp->l_proc->p_pid : -1,
223 	    oldlwp ? oldlwp->l_lid : -1,
224 	    newlwp ? newlwp->l_name : "none",
225 	    newlwp ? newlwp->l_proc->p_pid : -1,
226 	    newlwp ? newlwp->l_lid : -1);
227 	if (oldpcb) {
228 		thunk_printf_debug("    oldpcb uc_link=%p, uc_stack.ss_sp=%p, "
229 		    "uc_stack.ss_size=%d\n",
230 		    oldpcb->pcb_ucp.uc_link,
231 		    oldpcb->pcb_ucp.uc_stack.ss_sp,
232 		    (int)oldpcb->pcb_ucp.uc_stack.ss_size);
233 	}
234 	if (newpcb) {
235 		thunk_printf_debug("    newpcb uc_link=%p, uc_stack.ss_sp=%p, "
236 		    "uc_stack.ss_size=%d\n",
237 		    newpcb->pcb_ucp.uc_link,
238 		    newpcb->pcb_ucp.uc_stack.ss_sp,
239 		    (int)newpcb->pcb_ucp.uc_stack.ss_size);
240 	}
241 #endif /* !CPU_DEBUG */
242 
243 	/* create atomic switcher */
244 	thunk_makecontext(&sc->sc_ucp, (void (*)(void)) cpu_switchto_atomic,
245 			2, oldlwp, newlwp, NULL, NULL);
246 
247 	KASSERT(sc);
248 	if (oldpcb) {
249 		thunk_swapcontext(&oldpcb->pcb_ucp, &sc->sc_ucp);
250 		/* returns here */
251 	} else {
252 		thunk_setcontext(&sc->sc_ucp);
253 		/* never returns */
254 	}
255 
256 #ifdef CPU_DEBUG
257 	thunk_printf_debug("cpu_switchto: returning %p (was %p)\n", ci->ci_stash, oldlwp);
258 #endif
259 	return ci->ci_stash;
260 }
261 
262 void
263 cpu_dumpconf(void)
264 {
265 #ifdef CPU_DEBUG
266 	thunk_printf_debug("cpu_dumpconf\n");
267 #endif
268 }
269 
270 void
271 cpu_signotify(struct lwp *l)
272 {
273 }
274 
275 void
276 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
277 {
278 	struct pcb *pcb = lwp_getpcb(l);
279 	ucontext_t *ucp = &pcb->pcb_userret_ucp;
280 
281 #ifdef CPU_DEBUG
282 	thunk_printf_debug("cpu_getmcontext\n");
283 #endif
284 	memcpy(mcp, &ucp->uc_mcontext, sizeof(mcontext_t));
285 	return;
286 }
287 
288 int
289 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
290 {
291 	struct pcb *pcb = lwp_getpcb(l);
292 	ucontext_t *ucp = &pcb->pcb_userret_ucp;
293 
294 #ifdef CPU_DEBUG
295 	thunk_printf_debug("cpu_setmcontext\n");
296 #endif
297 	memcpy(&ucp->uc_mcontext, mcp, sizeof(mcontext_t));
298 	return 0;
299 }
300 
301 void
302 cpu_idle(void)
303 {
304 	struct cpu_info *ci = curcpu();
305 
306 	if (ci->ci_want_resched)
307 		return;
308 
309 	thunk_idle();
310 }
311 
312 void
313 cpu_lwp_free(struct lwp *l, int proc)
314 {
315 #ifdef CPU_DEBUG
316 	thunk_printf_debug("cpu_lwp_free (dummy)\n");
317 #endif
318 }
319 
320 void
321 cpu_lwp_free2(struct lwp *l)
322 {
323 	struct pcb *pcb = lwp_getpcb(l);
324 
325 #ifdef CPU_DEBUG
326 	thunk_printf_debug("cpu_lwp_free2\n");
327 #endif
328 
329 	if (pcb == NULL)
330 		return;
331 	/* XXX nothing to do? */
332 }
333 
334 static void
335 cpu_lwp_trampoline(ucontext_t *ucp, void (*func)(void *), void *arg)
336 {
337 #ifdef CPU_DEBUG
338 	thunk_printf_debug("cpu_lwp_trampoline called with func %p, arg %p\n", (void *) func, arg);
339 #endif
340 	/* init lwp */
341 	lwp_startup(curcpu()->ci_stash, curlwp);
342 
343 	/* actual jump */
344 	thunk_makecontext(ucp, (void (*)(void)) func, 1, arg, NULL, NULL, NULL);
345 	thunk_setcontext(ucp);
346 }
347 
348 void
349 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
350     void (*func)(void *), void *arg)
351 {
352 	struct pcb *pcb1 = lwp_getpcb(l1);
353 	struct pcb *pcb2 = lwp_getpcb(l2);
354 
355 #ifdef CPU_DEBUG
356 	thunk_printf_debug("cpu_lwp_fork [%s/%p] -> [%s/%p] stack=%p stacksize=%d\n",
357 	    l1 ? l1->l_name : "none", l1,
358 	    l2 ? l2->l_name : "none", l2,
359 	    stack, (int)stacksize);
360 #endif
361 
362 	if (stack)
363 		panic("%s: stack passed, can't handle\n", __func__);
364 
365 	/* copy the PCB and its switchframes from parent */
366 	memcpy(pcb2, pcb1, sizeof(struct pcb));
367 
368 	/* refresh context */
369 	if (thunk_getcontext(&pcb2->pcb_ucp))
370 		panic("getcontext failed");
371 
372 	/* recalculate the system stack top */
373 	pcb2->sys_stack_top = pcb2->sys_stack + TRAPSTACKSIZE;
374 
375 	/* get l2 its own stack */
376 	pcb2->pcb_ucp.uc_stack.ss_sp = pcb2->sys_stack;
377 	pcb2->pcb_ucp.uc_stack.ss_size = pcb2->sys_stack_top - pcb2->sys_stack;
378 	pcb2->pcb_ucp.uc_link = &pcb2->pcb_userret_ucp;
379 
380 	thunk_sigemptyset(&pcb2->pcb_ucp.uc_sigmask);
381 	pcb2->pcb_ucp.uc_flags = _UC_STACK | _UC_CPU | _UC_SIGMASK;
382 	thunk_makecontext(&pcb2->pcb_ucp,
383 	    (void (*)(void)) cpu_lwp_trampoline,
384 	    3, &pcb2->pcb_ucp, func, arg, NULL);
385 }
386 
387 void
388 cpu_initclocks(void)
389 {
390 	extern timer_t clock_timerid;
391 
392 	thunk_timer_start(clock_timerid, HZ);
393 }
394 
395 void
396 cpu_startup(void)
397 {
398 	vaddr_t minaddr, maxaddr;
399 	size_t msgbufsize = 32 * 1024;
400 
401 	/* get ourself a message buffer */
402 	um_msgbuf = kmem_zalloc(msgbufsize, KM_SLEEP);
403 	if (um_msgbuf == NULL)
404 		panic("couldn't allocate msgbuf");
405 	initmsgbuf(um_msgbuf, msgbufsize);
406 
407 	/* allocate a submap for physio, 1Mb enough? */
408 	minaddr = 0;
409 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
410 				   1024 * 1024, 0, false, NULL);
411 
412 	/* say hi! */
413 	banner();
414 
415 	/* init lwp0 */
416 	memset(&lwp0pcb, 0, sizeof(lwp0pcb));
417 	thunk_getcontext(&lwp0pcb.pcb_ucp);
418 	thunk_sigemptyset(&lwp0pcb.pcb_ucp.uc_sigmask);
419 	lwp0pcb.pcb_ucp.uc_flags = _UC_STACK | _UC_CPU | _UC_SIGMASK;
420 
421 	uvm_lwp_setuarea(&lwp0, (vaddr_t) &lwp0pcb);
422 	memcpy(&lwp0pcb.pcb_userret_ucp, &lwp0pcb.pcb_ucp, sizeof(ucontext_t));
423 
424 	/* set stack top */
425 	lwp0pcb.sys_stack_top = lwp0pcb.sys_stack + TRAPSTACKSIZE;
426 }
427 
428 void
429 cpu_rootconf(void)
430 {
431 	extern char *usermode_root_device;
432 	device_t rdev;
433 
434 	if (usermode_root_device != NULL) {
435 		rdev = device_find_by_xname(usermode_root_device);
436 	} else {
437 		rdev = device_find_by_xname("ld0");
438 		if (rdev == NULL)
439 			rdev = device_find_by_xname("md0");
440 	}
441 
442 	aprint_normal("boot device: %s\n",
443 	    rdev ? device_xname(rdev) : "<unknown>");
444 	setroot(rdev, 0);
445 }
446 
447 bool
448 cpu_intr_p(void)
449 {
450 	int idepth;
451 
452 	kpreempt_disable();
453 	idepth = curcpu()->ci_idepth;
454 	kpreempt_enable();
455 
456 	return (idepth >= 0);
457 }
458