xref: /netbsd-src/sys/arch/usermode/dev/cpu.c (revision deb6f0161a9109e7de9b519dc8dfb9478668dcdd)
1 /* $NetBSD: cpu.c,v 1.80 2018/06/01 07:26:15 reinoud Exp $ */
2 
3 /*-
4  * Copyright (c) 2007 Jared D. McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "opt_cpu.h"
30 #include "opt_hz.h"
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.80 2018/06/01 07:26:15 reinoud Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/proc.h>
38 #include <sys/systm.h>
39 #include <sys/device.h>
40 #include <sys/reboot.h>
41 #include <sys/lwp.h>
42 #include <sys/cpu.h>
43 #include <sys/mbuf.h>
44 #include <sys/msgbuf.h>
45 #include <sys/kmem.h>
46 #include <sys/kernel.h>
47 #include <sys/mount.h>
48 
49 #include <dev/cons.h>
50 
51 #include <machine/cpu.h>
52 #include <machine/mainbus.h>
53 #include <machine/pcb.h>
54 #include <machine/machdep.h>
55 #include <machine/thunk.h>
56 
57 #include <uvm/uvm_extern.h>
58 #include <uvm/uvm_page.h>
59 
60 #if __GNUC_PREREQ__(4,4)
61 #define cpu_unreachable()	__builtin_unreachable()
62 #else
63 #define cpu_unreachable()	do { thunk_abort(); } while (0)
64 #endif
65 
66 static int	cpu_match(device_t, cfdata_t, void *);
67 static void	cpu_attach(device_t, device_t, void *);
68 
69 /* XXX */
70 //extern void *_lwp_getprivate(void);
71 //extern int _lwp_setprivate(void *);
72 
73 
74 struct cpu_info cpu_info_primary = {
75 	.ci_dev = 0,
76 	.ci_self = &cpu_info_primary,
77 	.ci_idepth = -1,
78 	.ci_curlwp = &lwp0,
79 };
80 
81 typedef struct cpu_softc {
82 	device_t	sc_dev;
83 	struct cpu_info	*sc_ci;
84 
85 	ucontext_t	sc_ucp;
86 	uint8_t		sc_ucp_stack[PAGE_SIZE];
87 } cpu_softc_t;
88 
89 
90 /* statics */
91 static struct pcb lwp0pcb;
92 static void *um_msgbuf;
93 
94 
95 /* attachment */
96 CFATTACH_DECL_NEW(cpu, sizeof(cpu_softc_t), cpu_match, cpu_attach, NULL, NULL);
97 
98 static int
99 cpu_match(device_t parent, cfdata_t match, void *opaque)
100 {
101 	struct thunkbus_attach_args *taa = opaque;
102 
103 	if (taa->taa_type != THUNKBUS_TYPE_CPU)
104 		return 0;
105 
106 	return 1;
107 }
108 
109 static void
110 cpu_attach(device_t parent, device_t self, void *opaque)
111 {
112 	cpu_softc_t *sc = device_private(self);
113 
114 	aprint_naive("\n");
115 	aprint_normal("\n");
116 
117 	cpu_info_primary.ci_dev = self;
118 	sc->sc_dev = self;
119 	sc->sc_ci = &cpu_info_primary;
120 
121 	thunk_getcontext(&sc->sc_ucp);
122 	sc->sc_ucp.uc_stack.ss_sp = sc->sc_ucp_stack;
123 	sc->sc_ucp.uc_stack.ss_size = PAGE_SIZE - sizeof(register_t);
124 	sc->sc_ucp.uc_flags = _UC_STACK | _UC_CPU | _UC_SIGMASK;
125 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGALRM);
126 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGIO);
127 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGINT);
128 	thunk_sigaddset(&sc->sc_ucp.uc_sigmask, SIGTSTP);
129 }
130 
131 void
132 cpu_configure(void)
133 {
134 	cpu_setmodel("virtual processor");
135 	if (config_rootfound("mainbus", NULL) == NULL)
136 		panic("configure: mainbus not configured");
137 
138 	spl0();
139 }
140 
141 
142 /* main guts */
143 void
144 cpu_reboot(int howto, char *bootstr)
145 {
146 	extern void usermode_reboot(void);
147 
148 	if (cold)
149 		howto |= RB_HALT;
150 
151 	if ((howto & RB_NOSYNC) == 0)
152 		vfs_shutdown();
153 	else
154 		suspendsched();
155 
156 	doshutdownhooks();
157 	pmf_system_shutdown(boothowto);
158 
159 	if ((howto & RB_POWERDOWN) == RB_POWERDOWN)
160 		thunk_exit(0);
161 
162 	splhigh();
163 
164 	if (howto & RB_DUMP)
165 		thunk_abort();
166 
167 	if (howto & RB_HALT) {
168 		printf("\n");
169 		printf("The operating system has halted.\n");
170 		printf("Please press any key to reboot.\n\n");
171 		cnpollc(1);
172 		cngetc();
173 		cnpollc(0);
174 	}
175 
176 	printf("rebooting...\n");
177 
178 	usermode_reboot();
179 
180 	/* NOTREACHED */
181 	cpu_unreachable();
182 }
183 
184 void
185 cpu_need_resched(struct cpu_info *ci, int flags)
186 {
187 	ci->ci_want_resched |= flags;
188 	aston(ci);
189 }
190 
191 void
192 cpu_need_proftick(struct lwp *l)
193 {
194 }
195 
196 int
197 cpu_lwp_setprivate(lwp_t *l, void *ptr)
198 {
199 	struct pcb *pcb = lwp_getpcb(l);
200 
201 	/* set both ucontexts up for TLS just in case */
202 	pcb->pcb_ucp.uc_mcontext._mc_tlsbase =
203 		(uintptr_t) ptr;
204 	pcb->pcb_ucp.uc_flags |= _UC_TLSBASE;
205 
206 	pcb->pcb_userret_ucp.uc_mcontext._mc_tlsbase =
207 		(uintptr_t) ptr;
208 	pcb->pcb_userret_ucp.uc_flags |= _UC_TLSBASE;
209 
210 	return 0;
211 }
212 
213 static
214 void
215 cpu_switchto_atomic(lwp_t *oldlwp, lwp_t *newlwp)
216 {
217 	struct pcb *oldpcb;
218 	struct pcb *newpcb;
219 	struct cpu_info *ci;
220 	int s;
221 
222 	oldpcb = oldlwp ? lwp_getpcb(oldlwp) : NULL;
223 	newpcb = lwp_getpcb(newlwp);
224 	ci = curcpu();
225 
226 	s = splhigh();
227 
228 	ci->ci_stash = oldlwp;
229 	if (oldpcb)
230 		oldpcb->pcb_errno = thunk_geterrno();
231 
232 	thunk_seterrno(newpcb->pcb_errno);
233 	curlwp = newlwp;
234 
235 	splx(s);
236 
237 	if (thunk_setcontext(&newpcb->pcb_ucp))
238 		panic("setcontext failed");
239 
240 	/* not reached */
241 }
242 
243 
244 lwp_t *
245 cpu_switchto(lwp_t *oldlwp, lwp_t *newlwp, bool returning)
246 {
247 	struct pcb *oldpcb = oldlwp ? lwp_getpcb(oldlwp) : NULL;
248 	struct pcb *newpcb = lwp_getpcb(newlwp);
249 	struct cpu_info *ci = curcpu();
250 	cpu_softc_t *sc = device_private(ci->ci_dev);
251 
252 #ifdef CPU_DEBUG
253 	thunk_printf_debug("cpu_switchto [%s,pid=%d,lid=%d] -> [%s,pid=%d,lid=%d]\n",
254 	    oldlwp ? oldlwp->l_name : "none",
255 	    oldlwp ? oldlwp->l_proc->p_pid : -1,
256 	    oldlwp ? oldlwp->l_lid : -1,
257 	    newlwp ? newlwp->l_name : "none",
258 	    newlwp ? newlwp->l_proc->p_pid : -1,
259 	    newlwp ? newlwp->l_lid : -1);
260 	if (oldpcb) {
261 		thunk_printf_debug("    oldpcb uc_link=%p, uc_stack.ss_sp=%p, "
262 		    "uc_stack.ss_size=%d, l_private %p, uc_mcontext._mc_tlsbase=%p(%s)\n",
263 		    oldpcb->pcb_ucp.uc_link,
264 		    oldpcb->pcb_ucp.uc_stack.ss_sp,
265 		    (int)oldpcb->pcb_ucp.uc_stack.ss_size,
266 		    (void *) oldlwp->l_private,
267 		    (void *) oldpcb->pcb_ucp.uc_mcontext._mc_tlsbase,
268 		    oldpcb->pcb_ucp.uc_flags & _UC_TLSBASE? "ON":"off");
269 	}
270 	if (newpcb) {
271 		thunk_printf_debug("    newpewcb uc_link=%p, uc_stack.ss_sp=%p, "
272 		    "uc_stack.ss_size=%d, l_private %p, uc_mcontext._mc_tlsbase=%p(%s)\n",
273 		    newpcb->pcb_ucp.uc_link,
274 		    newpcb->pcb_ucp.uc_stack.ss_sp,
275 		    (int)newpcb->pcb_ucp.uc_stack.ss_size,
276 		    (void *) newlwp->l_private,
277 		    (void *) newpcb->pcb_ucp.uc_mcontext._mc_tlsbase,
278 		    newpcb->pcb_ucp.uc_flags & _UC_TLSBASE? "ON":"off");
279 	}
280 #endif /* !CPU_DEBUG */
281 
282 	/* create atomic switcher */
283 	KASSERT(newlwp);
284 	thunk_makecontext(&sc->sc_ucp, (void (*)(void)) cpu_switchto_atomic,
285 			2, oldlwp, newlwp, NULL, NULL);
286 	KASSERT(sc);
287 	if (oldpcb) {
288 		thunk_swapcontext(&oldpcb->pcb_ucp, &sc->sc_ucp);
289 		/* returns here */
290 	} else {
291 		thunk_setcontext(&sc->sc_ucp);
292 		/* never returns */
293 	}
294 
295 #ifdef CPU_DEBUG
296 	thunk_printf_debug("cpu_switchto: returning %p (was %p)\n", ci->ci_stash, oldlwp);
297 #endif
298 	return ci->ci_stash;
299 }
300 
301 void
302 cpu_dumpconf(void)
303 {
304 #ifdef CPU_DEBUG
305 	thunk_printf_debug("cpu_dumpconf\n");
306 #endif
307 }
308 
309 void
310 cpu_signotify(struct lwp *l)
311 {
312 }
313 
314 void
315 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
316 {
317 	struct pcb *pcb = lwp_getpcb(l);
318 	ucontext_t *ucp = &pcb->pcb_userret_ucp;
319 
320 #ifdef CPU_DEBUG
321 	thunk_printf_debug("cpu_getmcontext\n");
322 #endif
323 	memcpy(mcp, &ucp->uc_mcontext, sizeof(mcontext_t));
324 
325 	/* report we have the CPU FPU and TLSBASE registers */
326 	mcp->_mc_tlsbase = (uintptr_t) l->l_private;
327 	*flags = _UC_CPU | _UC_FPU | _UC_TLSBASE;
328 
329 	return;
330 }
331 
332 int
333 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
334 {
335 	/*
336 	 * can we check here? or should that be done in the target
337 	 * specific places?
338 	 */
339 	/* XXX NO CHECKING! XXX */
340 #ifdef CPU_DEBUG
341 	thunk_printf_debug("cpu_mcontext_validate\n");
342 #endif
343 	return 0;
344 }
345 
346 int
347 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
348 {
349 	struct pcb *pcb = lwp_getpcb(l);
350 	ucontext_t *ucp = &pcb->pcb_userret_ucp;
351 
352 #ifdef CPU_DEBUG
353 	thunk_printf_debug("cpu_setmcontext\n");
354 #endif
355 	if ((flags & _UC_CPU) != 0)
356 		memcpy(&ucp->uc_mcontext.__gregs, &mcp->__gregs, sizeof(__gregset_t));
357 	if ((flags & _UC_FPU) != 0)
358 		memcpy(&ucp->uc_mcontext.__fpregs, &mcp->__fpregs, sizeof(__fpregset_t));
359 	if ((flags & _UC_TLSBASE) != 0)
360 		lwp_setprivate(l, (void *) (uintptr_t) mcp->_mc_tlsbase);
361 
362 #if 0
363 	/*
364 	 * XXX we ignore the set and clear stack since signals are done
365 	 * slightly differently.
366 	 */
367 thunk_printf("%s: flags %x\n", __func__, flags);
368 	mutex_enter(l->l_proc->p_lock);
369 	if (flags & _UC_SETSTACK)
370 		l->l_sigstk.ss_flags |= SS_ONSTACK;
371 	if (flags & _UC_CLRSTACK)
372 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
373 	mutex_exit(l->l_proc->p_lock);
374 #endif
375 
376 	ucp->uc_flags |= (flags & (_UC_CPU | _UC_FPU | _UC_TLSBASE));
377 
378 	return 0;
379 }
380 
381 void
382 cpu_idle(void)
383 {
384 	struct cpu_info *ci = curcpu();
385 
386 	if (ci->ci_want_resched)
387 		return;
388 
389 	thunk_idle();
390 }
391 
392 void
393 cpu_lwp_free(struct lwp *l, int proc)
394 {
395 #ifdef CPU_DEBUG
396 	thunk_printf_debug("cpu_lwp_free (dummy)\n");
397 #endif
398 }
399 
400 void
401 cpu_lwp_free2(struct lwp *l)
402 {
403 	struct pcb *pcb = lwp_getpcb(l);
404 
405 #ifdef CPU_DEBUG
406 	thunk_printf_debug("cpu_lwp_free2\n");
407 #endif
408 
409 	if (pcb == NULL)
410 		return;
411 	/* XXX nothing to do? */
412 }
413 
414 static void
415 cpu_lwp_trampoline(ucontext_t *ucp, void (*func)(void *), void *arg)
416 {
417 #ifdef CPU_DEBUG
418 	thunk_printf_debug("cpu_lwp_trampoline called with func %p, arg %p\n", (void *) func, arg);
419 #endif
420 	/* init lwp */
421 	lwp_startup(curcpu()->ci_stash, curlwp);
422 
423 	/* actual jump */
424 	thunk_makecontext(ucp, (void (*)(void)) func, 1, arg, NULL, NULL, NULL);
425 	thunk_setcontext(ucp);
426 }
427 
428 void
429 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
430     void (*func)(void *), void *arg)
431 {
432 	struct pcb *pcb1 = lwp_getpcb(l1);
433 	struct pcb *pcb2 = lwp_getpcb(l2);
434 
435 #ifdef CPU_DEBUG
436 	thunk_printf_debug("cpu_lwp_fork [%s/%p] -> [%s/%p] stack=%p stacksize=%d\n",
437 	    l1 ? l1->l_name : "none", l1,
438 	    l2 ? l2->l_name : "none", l2,
439 	    stack, (int)stacksize);
440 #endif
441 	if (stack)
442 		panic("%s: stack passed, can't handle\n", __func__);
443 
444 	/* copy the PCB and its switchframes from parent */
445 	memcpy(pcb2, pcb1, sizeof(struct pcb));
446 
447 	/* refresh context, XXX needed? */
448 	if (thunk_getcontext(&pcb2->pcb_ucp))
449 		panic("getcontext failed");
450 
451 	/* set up for TLS */
452 	pcb2->pcb_ucp.uc_mcontext._mc_tlsbase = (intptr_t) l2->l_private;
453 	pcb2->pcb_ucp.uc_flags |= _UC_TLSBASE;
454 
455 	/* recalculate the system stack top */
456 	pcb2->sys_stack_top = pcb2->sys_stack + TRAPSTACKSIZE;
457 
458 	/* get l2 its own stack */
459 	pcb2->pcb_ucp.uc_stack.ss_sp = pcb2->sys_stack;
460 	pcb2->pcb_ucp.uc_stack.ss_size = pcb2->sys_stack_top - pcb2->sys_stack;
461 	pcb2->pcb_ucp.uc_link = &pcb2->pcb_userret_ucp;
462 
463 	thunk_sigemptyset(&pcb2->pcb_ucp.uc_sigmask);
464 
465 	thunk_makecontext(&pcb2->pcb_ucp,
466 	    (void (*)(void)) cpu_lwp_trampoline,
467 	    3, &pcb2->pcb_ucp, func, arg, NULL);
468 }
469 
470 void
471 cpu_initclocks(void)
472 {
473 	extern timer_t clock_timerid;
474 
475 	thunk_timer_start(clock_timerid, HZ);
476 }
477 
478 void
479 cpu_startup(void)
480 {
481 	vaddr_t minaddr, maxaddr;
482 	size_t msgbufsize = 32 * 1024;
483 
484 	/* get ourself a message buffer */
485 	um_msgbuf = kmem_zalloc(msgbufsize, KM_SLEEP);
486 	initmsgbuf(um_msgbuf, msgbufsize);
487 
488 	/* allocate a submap for physio, 1Mb enough? */
489 	minaddr = 0;
490 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
491 				   1024 * 1024, 0, false, NULL);
492 
493 	/* say hi! */
494 	banner();
495 
496 	/* init lwp0 */
497 	memset(&lwp0pcb, 0, sizeof(lwp0pcb));
498 	thunk_getcontext(&lwp0pcb.pcb_ucp);
499 	thunk_sigemptyset(&lwp0pcb.pcb_ucp.uc_sigmask);
500 	lwp0pcb.pcb_ucp.uc_flags = _UC_STACK | _UC_CPU | _UC_SIGMASK;
501 
502 	uvm_lwp_setuarea(&lwp0, (vaddr_t) &lwp0pcb);
503 	memcpy(&lwp0pcb.pcb_userret_ucp, &lwp0pcb.pcb_ucp, sizeof(ucontext_t));
504 
505 	/* set stack top */
506 	lwp0pcb.sys_stack_top = lwp0pcb.sys_stack + TRAPSTACKSIZE;
507 }
508 
509 void
510 cpu_rootconf(void)
511 {
512 	extern char *usermode_root_device;
513 	device_t rdev;
514 
515 	if (usermode_root_device != NULL) {
516 		rdev = device_find_by_xname(usermode_root_device);
517 	} else {
518 		rdev = device_find_by_xname("ld0");
519 		if (rdev == NULL)
520 			rdev = device_find_by_xname("md0");
521 	}
522 
523 	aprint_normal("boot device: %s\n",
524 	    rdev ? device_xname(rdev) : "<unknown>");
525 	booted_device = rdev;
526 	rootconf();
527 }
528 
529 bool
530 cpu_intr_p(void)
531 {
532 	int idepth;
533 
534 	kpreempt_disable();
535 	idepth = curcpu()->ci_idepth;
536 	kpreempt_enable();
537 
538 	return (idepth >= 0);
539 }
540