xref: /netbsd-src/sys/rump/librump/rumpkern/emul.c (revision 6a1508dad3515842aa76bf5ec8fc2daab5f5af02)
1 /*	$NetBSD: emul.c,v 1.79 2009/02/27 15:15:19 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
5  *
6  * Development of this software was supported by Google Summer of Code.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.79 2009/02/27 15:15:19 pooka Exp $");
32 
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/null.h>
36 #include <sys/vnode.h>
37 #include <sys/stat.h>
38 #include <sys/select.h>
39 #include <sys/syslog.h>
40 #include <sys/namei.h>
41 #include <sys/kauth.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kthread.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/poll.h>
51 #include <sys/timetc.h>
52 #include <sys/tprintf.h>
53 #include <sys/module.h>
54 #include <sys/tty.h>
55 #include <sys/reboot.h>
56 
57 #include <dev/cons.h>
58 
59 #include <machine/stdarg.h>
60 
61 #include <rump/rumpuser.h>
62 
63 #include <uvm/uvm_map.h>
64 
65 #include "rump_private.h"
66 
67 time_t time_second = 1;
68 
69 kmutex_t *proc_lock;
70 struct lwp lwp0;
71 struct vnode *rootvp;
72 struct device *root_device;
73 dev_t rootdev;
74 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
75 int doing_shutdown;
76 int ncpu = 1;
77 const int schedppq = 1;
78 int hardclock_ticks;
79 bool mp_online = false;
80 struct vm_map *mb_map;
81 struct timeval boottime;
82 struct emul emul_netbsd;
83 int cold = 1;
84 int boothowto;
85 struct tty *constty;
86 
87 char hostname[MAXHOSTNAMELEN];
88 size_t hostnamelen;
89 
90 u_long	bufmem_valimit;
91 u_long	bufmem_hiwater;
92 u_long	bufmem_lowater;
93 u_long	bufmem;
94 u_int	nbuf;
95 
96 const char *panicstr;
97 const char ostype[] = "NetBSD";
98 const char osrelease[] = "999"; /* paradroid 4evah */
99 const char kernel_ident[] = "RUMP-ROAST";
100 const char *domainname;
101 int domainnamelen;
102 
103 const struct filterops seltrue_filtops;
104 
105 #define DEVSW_SIZE 255
106 const struct bdevsw *bdevsw0[DEVSW_SIZE]; /* XXX storage size */
107 const struct bdevsw **bdevsw = bdevsw0;
108 const int sys_cdevsws = DEVSW_SIZE;
109 int max_cdevsws = DEVSW_SIZE;
110 
111 const struct cdevsw *cdevsw0[DEVSW_SIZE]; /* XXX storage size */
112 const struct cdevsw **cdevsw = cdevsw0;
113 const int sys_bdevsws = DEVSW_SIZE;
114 int max_bdevsws = DEVSW_SIZE;
115 
116 struct devsw_conv devsw_conv0;
117 struct devsw_conv *devsw_conv = &devsw_conv0;
118 int max_devsw_convs = 0;
119 int mem_no = 2;
120 
121 kmutex_t tty_lock;
122 
123 int
124 copyin(const void *uaddr, void *kaddr, size_t len)
125 {
126 
127 	memcpy(kaddr, uaddr, len);
128 	return 0;
129 }
130 
131 int
132 copyout(const void *kaddr, void *uaddr, size_t len)
133 {
134 
135 	memcpy(uaddr, kaddr, len);
136 	return 0;
137 }
138 
139 int
140 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
141 {
142 
143 	return copyinstr(kfaddr, kdaddr, len, done);
144 }
145 
146 int
147 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
148 {
149 
150 	strlcpy(kaddr, uaddr, len);
151 	if (done)
152 		*done = strlen(kaddr)+1; /* includes termination */
153 	return 0;
154 }
155 
156 int
157 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
158 {
159 
160 	return copyin(uaddr, kaddr, len);
161 }
162 
163 int
164 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
165 {
166 
167 	return copyout(kaddr, uaddr, len);
168 }
169 
170 int
171 kcopy(const void *src, void *dst, size_t len)
172 {
173 
174 	memcpy(dst, src, len);
175 	return 0;
176 }
177 
178 int
179 uiomove(void *buf, size_t n, struct uio *uio)
180 {
181 	struct iovec *iov;
182 	uint8_t *b = buf;
183 	size_t cnt;
184 
185 	if (uio->uio_vmspace != UIO_VMSPACE_SYS)
186 		panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
187 
188 	while (n && uio->uio_resid) {
189 		iov = uio->uio_iov;
190 		cnt = iov->iov_len;
191 		if (cnt == 0) {
192 			uio->uio_iov++;
193 			uio->uio_iovcnt--;
194 			continue;
195 		}
196 		if (cnt > n)
197 			cnt = n;
198 
199 		if (uio->uio_rw == UIO_READ)
200 			memcpy(iov->iov_base, b, cnt);
201 		else
202 			memcpy(b, iov->iov_base, cnt);
203 
204 		iov->iov_base = (uint8_t *)iov->iov_base + cnt;
205 		iov->iov_len -= cnt;
206 		b += cnt;
207 		uio->uio_resid -= cnt;
208 		uio->uio_offset += cnt;
209 		n -= cnt;
210 	}
211 
212 	return 0;
213 }
214 
215 void
216 uio_setup_sysspace(struct uio *uio)
217 {
218 
219 	uio->uio_vmspace = UIO_VMSPACE_SYS;
220 }
221 
222 devclass_t
223 device_class(device_t dev)
224 {
225 
226 	if (dev != root_device)
227 		panic("%s: dev != root_device not supported", __func__);
228 
229 	return DV_DISK;
230 }
231 
232 void
233 getmicrouptime(struct timeval *tvp)
234 {
235 	uint64_t sec, nsec;
236 	int error;
237 
238 	/* XXX: this is wrong, does not report *uptime* */
239 	rumpuser_gettime(&sec, &nsec, &error);
240 	tvp->tv_sec = sec;
241 	tvp->tv_usec = nsec / 1000;
242 }
243 
244 void
245 malloc_type_attach(struct malloc_type *type)
246 {
247 
248 	return;
249 }
250 
251 void
252 malloc_type_detach(struct malloc_type *type)
253 {
254 
255 	return;
256 }
257 
258 void *
259 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
260 {
261 	void *rv;
262 
263 	rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
264 	if (rv && flags & M_ZERO)
265 		memset(rv, 0, size);
266 
267 	return rv;
268 }
269 
270 void *
271 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
272 {
273 
274 	return rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
275 }
276 
277 void
278 kern_free(void *ptr, struct malloc_type *type)
279 {
280 
281 	rumpuser_free(ptr);
282 }
283 
284 static void
285 gettime(struct timespec *ts)
286 {
287 	uint64_t sec, nsec;
288 	int error;
289 
290 	rumpuser_gettime(&sec, &nsec, &error);
291 	ts->tv_sec = sec;
292 	ts->tv_nsec = nsec;
293 }
294 
295 void
296 nanotime(struct timespec *ts)
297 {
298 
299 	if (rump_threads) {
300 		rump_gettime(ts);
301 	} else {
302 		gettime(ts);
303 	}
304 }
305 
306 /* hooray for mick, so what if I do */
307 void
308 getnanotime(struct timespec *ts)
309 {
310 
311 	nanotime(ts);
312 }
313 
314 void
315 microtime(struct timeval *tv)
316 {
317 	struct timespec ts;
318 
319 	if (rump_threads) {
320 		rump_gettime(&ts);
321 		TIMESPEC_TO_TIMEVAL(tv, &ts);
322 	} else {
323 		gettime(&ts);
324 		TIMESPEC_TO_TIMEVAL(tv, &ts);
325 	}
326 }
327 
328 void
329 getmicrotime(struct timeval *tv)
330 {
331 
332 	microtime(tv);
333 }
334 
335 struct kthdesc {
336 	void (*f)(void *);
337 	void *arg;
338 	struct lwp *mylwp;
339 };
340 
341 static void *
342 threadbouncer(void *arg)
343 {
344 	struct kthdesc *k = arg;
345 	void (*f)(void *);
346 	void *thrarg;
347 
348 	f = k->f;
349 	thrarg = k->arg;
350 	rumpuser_set_curlwp(k->mylwp);
351 	kmem_free(k, sizeof(struct kthdesc));
352 
353 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
354 		KERNEL_LOCK(1, NULL);
355 	f(thrarg);
356 	panic("unreachable, should kthread_exit()");
357 }
358 
359 int
360 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
361 	void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
362 {
363 	char thrstore[MAXCOMLEN];
364 	const char *thrname = NULL;
365 	va_list ap;
366 	struct kthdesc *k;
367 	struct lwp *l;
368 	int rv;
369 
370 	thrstore[0] = '\0';
371 	if (fmt) {
372 		va_start(ap, fmt);
373 		vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
374 		va_end(ap);
375 		thrname = thrstore;
376 	}
377 
378 	/*
379 	 * We don't want a module unload thread.
380 	 * (XXX: yes, this is a kludge too, and the kernel should
381 	 * have a more flexible method for configuring which threads
382 	 * we want).
383 	 */
384 	if (strcmp(thrstore, "modunload") == 0) {
385 		return 0;
386 	}
387 
388 	if (!rump_threads) {
389 		/* fake them */
390 		if (strcmp(thrstore, "vrele") == 0) {
391 			printf("rump warning: threads not enabled, not starting"
392 			   " vrele thread\n");
393 			return 0;
394 		} else if (strcmp(thrstore, "cachegc") == 0) {
395 			printf("rump warning: threads not enabled, not starting"
396 			   " namecache g/c thread\n");
397 			return 0;
398 		} else if (strcmp(thrstore, "nfssilly") == 0) {
399 			printf("rump warning: threads not enabled, not enabling"
400 			   " nfs silly rename\n");
401 			return 0;
402 		} else
403 			panic("threads not available, setenv RUMP_THREADS 1");
404 	}
405 
406 	KASSERT(fmt != NULL);
407 	if (ci != NULL)
408 		panic("%s: bounded threads not supported", __func__);
409 
410 	k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
411 	k->f = func;
412 	k->arg = arg;
413 	k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
414 	if (flags & KTHREAD_MPSAFE)
415 		l->l_pflag |= LP_MPSAFE;
416 	rv = rumpuser_thread_create(threadbouncer, k, thrname);
417 	if (rv)
418 		return rv;
419 
420 	if (newlp)
421 		*newlp = l;
422 	return 0;
423 }
424 
425 void
426 kthread_exit(int ecode)
427 {
428 
429 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
430 		KERNEL_UNLOCK_ONE(NULL);
431 	rump_clear_curlwp();
432 	rumpuser_thread_exit();
433 }
434 
435 struct proc *
436 p_find(pid_t pid, uint flags)
437 {
438 
439 	panic("%s: not implemented", __func__);
440 }
441 
442 struct pgrp *
443 pg_find(pid_t pid, uint flags)
444 {
445 
446 	panic("%s: not implemented", __func__);
447 }
448 
449 void
450 psignal(struct proc *p, int signo)
451 {
452 
453 	switch (signo) {
454 	case SIGSYS:
455 		break;
456 	default:
457 		panic("unhandled signal %d", signo);
458 	}
459 }
460 
461 void
462 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
463 {
464 
465 	panic("%s: not implemented", __func__);
466 }
467 
468 void
469 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
470 {
471 
472 	panic("%s: not implemented", __func__);
473 }
474 
475 int
476 pgid_in_session(struct proc *p, pid_t pg_id)
477 {
478 
479 	panic("%s: not implemented", __func__);
480 }
481 
482 int
483 sigispending(struct lwp *l, int signo)
484 {
485 
486 	return 0;
487 }
488 
489 void
490 sigpending1(struct lwp *l, sigset_t *ss)
491 {
492 
493 	panic("%s: not implemented", __func__);
494 }
495 
496 void
497 knote_fdclose(int fd)
498 {
499 
500 	/* since we don't add knotes, we don't have to remove them */
501 }
502 
503 int
504 seltrue_kqfilter(dev_t dev, struct knote *kn)
505 {
506 
507 	panic("%s: not implemented", __func__);
508 }
509 
510 int
511 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
512 {
513 	extern int hz;
514 	int rv, error;
515 	uint64_t sec, nsec;
516 
517 	if (mtx)
518 		mutex_exit(mtx);
519 
520 	sec = timeo / hz;
521 	nsec = (timeo % hz) * (1000000000 / hz);
522 	rv = rumpuser_nanosleep(&sec, &nsec, &error);
523 
524 	if (mtx)
525 		mutex_enter(mtx);
526 
527 	if (rv)
528 		return error;
529 
530 	return 0;
531 }
532 
533 void
534 suspendsched()
535 {
536 
537 	panic("%s: not implemented", __func__);
538 }
539 
540 u_int
541 lwp_unsleep(lwp_t *l, bool cleanup)
542 {
543 
544 	KASSERT(mutex_owned(l->l_mutex));
545 
546 	return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
547 }
548 
549 vaddr_t
550 calc_cache_size(struct vm_map *map, int pct, int va_pct)
551 {
552 	paddr_t t;
553 
554 	t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
555 	if ((vaddr_t)t != t) {
556 		panic("%s: needs tweak", __func__);
557 	}
558 	return t;
559 }
560 
561 int
562 seltrue(dev_t dev, int events, struct lwp *l)
563 {
564         return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
565 }
566 
567 void
568 selrecord(lwp_t *selector, struct selinfo *sip)
569 {
570 }
571 
572 void
573 selinit(struct selinfo *sip)
574 {
575 }
576 
577 void
578 selnotify(struct selinfo *sip, int events, long knhint)
579 {
580 }
581 
582 void
583 seldestroy(struct selinfo *sip)
584 {
585 }
586 
587 const char *
588 device_xname(device_t dv)
589 {
590 	return "bogus0";
591 }
592 
593 void
594 assert_sleepable(void)
595 {
596 
597 	/* always sleepable, although we should improve this */
598 }
599 
600 void
601 tc_setclock(const struct timespec *ts)
602 {
603 
604 	panic("%s: not implemented", __func__);
605 }
606 
607 void
608 proc_crmod_enter()
609 {
610 
611 	panic("%s: not implemented", __func__);
612 }
613 
614 void
615 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
616 {
617 
618 	panic("%s: not implemented", __func__);
619 }
620 
621 void
622 module_init_md()
623 {
624 
625 	/*
626 	 * Nothing for now.  However, we should load the librump
627 	 * symbol table.
628 	 */
629 }
630 
631 /* us and them, after all we're only ordinary seconds */
632 static void
633 rump_delay(unsigned int us)
634 {
635 	uint64_t sec, nsec;
636 	int error;
637 
638 	sec = us / 1000000;
639 	nsec = (us % 1000000) * 1000;
640 
641 	if (__predict_false(sec != 0))
642 		printf("WARNING: over 1s delay\n");
643 
644 	rumpuser_nanosleep(&sec, &nsec, &error);
645 }
646 void (*delay_func)(unsigned int) = rump_delay;
647 
648 void
649 kpreempt_disable()
650 {
651 
652 	/* XXX: see below */
653 	KPREEMPT_DISABLE(curlwp);
654 }
655 
656 void
657 kpreempt_enable()
658 {
659 
660 	/* try to make sure kpreempt_disable() is only used from panic() */
661 	panic("kpreempt not supported");
662 }
663 
664 void
665 sessdelete(struct session *ss)
666 {
667 
668 	panic("sessdelete() impossible, session %p", ss);
669 }
670 
671 int
672 ttycheckoutq(struct tty *tp, int wait)
673 {
674 
675 	return 1;
676 }
677 
678 void
679 cnputc(int c)
680 {
681 	int error;
682 
683 	rumpuser_putchar(c, &error);
684 }
685 
686 void
687 cnflush()
688 {
689 
690 	/* done */
691 }
692 
693 int
694 tputchar(int c, int flags, struct tty *tp)
695 {
696 
697 	cnputc(c);
698 	return 0;
699 }
700 
701 void
702 cpu_reboot(int howto, char *bootstr)
703 {
704 
705 	rumpuser_panic();
706 }
707 
708 /* XXX: static, but not used except to make spcopy.S link */
709 #ifdef __hppa__
710 #undef curlwp
711 struct lwp *curlwp = &lwp0;
712 #endif
713