xref: /netbsd-src/sys/rump/librump/rumpkern/emul.c (revision f75f5aae154fcd0572e8889e4fea2a51d67bbf08)
1 /*	$NetBSD: emul.c,v 1.98 2009/10/09 14:41:36 pooka Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
5  *
6  * Development of this software was supported by Google Summer of Code.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
18  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.98 2009/10/09 14:41:36 pooka Exp $");
32 
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/null.h>
36 #include <sys/vnode.h>
37 #include <sys/stat.h>
38 #include <sys/select.h>
39 #include <sys/syslog.h>
40 #include <sys/namei.h>
41 #include <sys/kauth.h>
42 #include <sys/conf.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/file.h>
46 #include <sys/filedesc.h>
47 #include <sys/kthread.h>
48 #include <sys/cpu.h>
49 #include <sys/kmem.h>
50 #include <sys/poll.h>
51 #include <sys/timetc.h>
52 #include <sys/tprintf.h>
53 #include <sys/module.h>
54 #include <sys/tty.h>
55 #include <sys/reboot.h>
56 
57 #include <dev/cons.h>
58 
59 #include <machine/stdarg.h>
60 
61 #include <rump/rumpuser.h>
62 
63 #include <uvm/uvm_map.h>
64 
65 #include "rump_private.h"
66 
67 time_t time_second = 1;
68 
69 kmutex_t *proc_lock;
70 struct lwp lwp0;
71 struct vnode *rootvp;
72 struct device *root_device;
73 dev_t rootdev;
74 int physmem = 256*256; /* 256 * 1024*1024 / 4k, PAGE_SIZE not always set */
75 int doing_shutdown;
76 int ncpu = 1;
77 const int schedppq = 1;
78 int hardclock_ticks;
79 bool mp_online = false;
80 struct vm_map *mb_map;
81 struct timeval boottime;
82 struct emul emul_netbsd;
83 int cold = 1;
84 int boothowto = AB_SILENT;
85 struct tty *constty;
86 
87 char hostname[MAXHOSTNAMELEN];
88 size_t hostnamelen;
89 
90 const char *panicstr;
91 const char ostype[] = "NetBSD";
92 const char osrelease[] = "999"; /* paradroid 4evah */
93 const char kernel_ident[] = "RUMP-ROAST";
94 const char *domainname;
95 int domainnamelen;
96 
97 const struct filterops seltrue_filtops;
98 const struct filterops sig_filtops;
99 
100 #define DEVSW_SIZE 255
101 const struct bdevsw *bdevsw0[DEVSW_SIZE]; /* XXX storage size */
102 const struct bdevsw **bdevsw = bdevsw0;
103 const int sys_cdevsws = DEVSW_SIZE;
104 int max_cdevsws = DEVSW_SIZE;
105 
106 const struct cdevsw *cdevsw0[DEVSW_SIZE]; /* XXX storage size */
107 const struct cdevsw **cdevsw = cdevsw0;
108 const int sys_bdevsws = DEVSW_SIZE;
109 int max_bdevsws = DEVSW_SIZE;
110 
111 struct devsw_conv devsw_conv0;
112 struct devsw_conv *devsw_conv = &devsw_conv0;
113 int max_devsw_convs = 0;
114 int mem_no = 2;
115 
116 struct device *booted_device;
117 struct device *booted_wedge;
118 int booted_partition;
119 
120 kmutex_t tty_lock;
121 
122 int
123 copyin(const void *uaddr, void *kaddr, size_t len)
124 {
125 
126 	if (curproc->p_vmspace == &rump_vmspace)
127 		memcpy(kaddr, uaddr, len);
128 	else
129 		rump_sysproxy_copyin(uaddr, kaddr, len);
130 	return 0;
131 }
132 
133 int
134 copyout(const void *kaddr, void *uaddr, size_t len)
135 {
136 
137 	if (curproc->p_vmspace == &rump_vmspace)
138 		memcpy(uaddr, kaddr, len);
139 	else
140 		rump_sysproxy_copyout(kaddr, uaddr, len);
141 	return 0;
142 }
143 
144 int
145 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
146 {
147 
148 	return copyinstr(kfaddr, kdaddr, len, done);
149 }
150 
151 int
152 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
153 {
154 
155 	if (curproc->p_vmspace == &rump_vmspace)
156 		strlcpy(kaddr, uaddr, len);
157 	else
158 		rump_sysproxy_copyin(uaddr, kaddr, len);
159 	if (done)
160 		*done = strlen(kaddr)+1; /* includes termination */
161 	return 0;
162 }
163 
164 int
165 copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done)
166 {
167 
168 	if (curproc->p_vmspace == &rump_vmspace)
169 		strlcpy(uaddr, kaddr, len);
170 	else
171 		rump_sysproxy_copyout(kaddr, uaddr, len);
172 	if (done)
173 		*done = strlen(uaddr)+1; /* includes termination */
174 	return 0;
175 }
176 
177 int
178 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
179 {
180 
181 	return copyin(uaddr, kaddr, len);
182 }
183 
184 int
185 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
186 {
187 
188 	return copyout(kaddr, uaddr, len);
189 }
190 
191 int
192 kcopy(const void *src, void *dst, size_t len)
193 {
194 
195 	memcpy(dst, src, len);
196 	return 0;
197 }
198 
199 int
200 uiomove(void *buf, size_t n, struct uio *uio)
201 {
202 	struct iovec *iov;
203 	uint8_t *b = buf;
204 	size_t cnt;
205 
206 	if (uio->uio_vmspace != UIO_VMSPACE_SYS)
207 		panic("%s: vmspace != UIO_VMSPACE_SYS", __func__);
208 
209 	while (n && uio->uio_resid) {
210 		iov = uio->uio_iov;
211 		cnt = iov->iov_len;
212 		if (cnt == 0) {
213 			uio->uio_iov++;
214 			uio->uio_iovcnt--;
215 			continue;
216 		}
217 		if (cnt > n)
218 			cnt = n;
219 
220 		if (uio->uio_rw == UIO_READ)
221 			memcpy(iov->iov_base, b, cnt);
222 		else
223 			memcpy(b, iov->iov_base, cnt);
224 
225 		iov->iov_base = (uint8_t *)iov->iov_base + cnt;
226 		iov->iov_len -= cnt;
227 		b += cnt;
228 		uio->uio_resid -= cnt;
229 		uio->uio_offset += cnt;
230 		n -= cnt;
231 	}
232 
233 	return 0;
234 }
235 
236 void
237 uio_setup_sysspace(struct uio *uio)
238 {
239 
240 	uio->uio_vmspace = UIO_VMSPACE_SYS;
241 }
242 
243 devclass_t
244 device_class(device_t dev)
245 {
246 
247 	if (dev != root_device)
248 		panic("%s: dev != root_device not supported", __func__);
249 
250 	return DV_DISK;
251 }
252 
253 void
254 getnanouptime(struct timespec *ts)
255 {
256 
257 	rump_getuptime(ts);
258 }
259 
260 void
261 getmicrouptime(struct timeval *tv)
262 {
263 	struct timespec ts;
264 
265 	getnanouptime(&ts);
266 	TIMESPEC_TO_TIMEVAL(tv, &ts);
267 }
268 
269 void
270 malloc_type_attach(struct malloc_type *type)
271 {
272 
273 	return;
274 }
275 
276 void
277 malloc_type_detach(struct malloc_type *type)
278 {
279 
280 	return;
281 }
282 
283 void *
284 kern_malloc(unsigned long size, struct malloc_type *type, int flags)
285 {
286 	void *rv;
287 
288 	rv = rumpuser_malloc(size, (flags & (M_CANFAIL | M_NOWAIT)) != 0);
289 	if (rv && flags & M_ZERO)
290 		memset(rv, 0, size);
291 
292 	return rv;
293 }
294 
295 void *
296 kern_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
297 {
298 
299 	return rumpuser_realloc(ptr, size, (flags & (M_CANFAIL|M_NOWAIT)) != 0);
300 }
301 
302 void
303 kern_free(void *ptr, struct malloc_type *type)
304 {
305 
306 	rumpuser_free(ptr);
307 }
308 
309 static void
310 gettime(struct timespec *ts)
311 {
312 	uint64_t sec, nsec;
313 	int error;
314 
315 	rumpuser_gettime(&sec, &nsec, &error);
316 	ts->tv_sec = sec;
317 	ts->tv_nsec = nsec;
318 }
319 
320 void
321 nanotime(struct timespec *ts)
322 {
323 
324 	if (rump_threads) {
325 		rump_gettime(ts);
326 	} else {
327 		gettime(ts);
328 	}
329 }
330 
331 /* hooray for mick, so what if I do */
332 void
333 getnanotime(struct timespec *ts)
334 {
335 
336 	nanotime(ts);
337 }
338 
339 void
340 microtime(struct timeval *tv)
341 {
342 	struct timespec ts;
343 
344 	if (rump_threads) {
345 		rump_gettime(&ts);
346 		TIMESPEC_TO_TIMEVAL(tv, &ts);
347 	} else {
348 		gettime(&ts);
349 		TIMESPEC_TO_TIMEVAL(tv, &ts);
350 	}
351 }
352 
353 void
354 getmicrotime(struct timeval *tv)
355 {
356 
357 	microtime(tv);
358 }
359 
360 struct kthdesc {
361 	void (*f)(void *);
362 	void *arg;
363 	struct lwp *mylwp;
364 };
365 
366 static void *
367 threadbouncer(void *arg)
368 {
369 	struct kthdesc *k = arg;
370 	void (*f)(void *);
371 	void *thrarg;
372 
373 	f = k->f;
374 	thrarg = k->arg;
375 	rumpuser_set_curlwp(k->mylwp);
376 	kmem_free(k, sizeof(struct kthdesc));
377 
378 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
379 		KERNEL_LOCK(1, NULL);
380 	f(thrarg);
381 	panic("unreachable, should kthread_exit()");
382 }
383 
384 int
385 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
386 	void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
387 {
388 	char thrstore[MAXCOMLEN];
389 	const char *thrname = NULL;
390 	va_list ap;
391 	struct kthdesc *k;
392 	struct lwp *l;
393 	int rv;
394 
395 	thrstore[0] = '\0';
396 	if (fmt) {
397 		va_start(ap, fmt);
398 		vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
399 		va_end(ap);
400 		thrname = thrstore;
401 	}
402 
403 	/*
404 	 * We don't want a module unload thread.
405 	 * (XXX: yes, this is a kludge too, and the kernel should
406 	 * have a more flexible method for configuring which threads
407 	 * we want).
408 	 */
409 	if (strcmp(thrstore, "modunload") == 0) {
410 		return 0;
411 	}
412 
413 	if (!rump_threads) {
414 		/* fake them */
415 		if (strcmp(thrstore, "vrele") == 0) {
416 			printf("rump warning: threads not enabled, not starting"
417 			   " vrele thread\n");
418 			return 0;
419 		} else if (strcmp(thrstore, "cachegc") == 0) {
420 			printf("rump warning: threads not enabled, not starting"
421 			   " namecache g/c thread\n");
422 			return 0;
423 		} else if (strcmp(thrstore, "nfssilly") == 0) {
424 			printf("rump warning: threads not enabled, not enabling"
425 			   " nfs silly rename\n");
426 			return 0;
427 		} else if (strcmp(thrstore, "unpgc") == 0) {
428 			printf("rump warning: threads not enabled, not enabling"
429 			   " UNP garbage collection\n");
430 			return 0;
431 		} else
432 			panic("threads not available, setenv RUMP_THREADS 1");
433 	}
434 
435 	KASSERT(fmt != NULL);
436 	if (ci != NULL)
437 		panic("%s: bounded threads not supported", __func__);
438 
439 	k = kmem_alloc(sizeof(struct kthdesc), KM_SLEEP);
440 	k->f = func;
441 	k->arg = arg;
442 	k->mylwp = l = rump_setup_curlwp(0, rump_nextlid(), 0);
443 	if (flags & KTHREAD_MPSAFE)
444 		l->l_pflag |= LP_MPSAFE;
445 	rv = rumpuser_thread_create(threadbouncer, k, thrname);
446 	if (rv)
447 		return rv;
448 
449 	if (newlp)
450 		*newlp = l;
451 	return 0;
452 }
453 
454 void
455 kthread_exit(int ecode)
456 {
457 
458 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
459 		KERNEL_UNLOCK_ONE(NULL);
460 	rump_clear_curlwp();
461 	rumpuser_thread_exit();
462 }
463 
464 struct proc *
465 p_find(pid_t pid, uint flags)
466 {
467 
468 	panic("%s: not implemented", __func__);
469 }
470 
471 struct pgrp *
472 pg_find(pid_t pid, uint flags)
473 {
474 
475 	panic("%s: not implemented", __func__);
476 }
477 
478 void
479 psignal(struct proc *p, int signo)
480 {
481 
482 	switch (signo) {
483 	case SIGSYS:
484 		break;
485 	default:
486 		panic("unhandled signal %d", signo);
487 	}
488 }
489 
490 void
491 kpsignal(struct proc *p, ksiginfo_t *ksi, void *data)
492 {
493 
494 	panic("%s: not implemented", __func__);
495 }
496 
497 void
498 kpgsignal(struct pgrp *pgrp, ksiginfo_t *ksi, void *data, int checkctty)
499 {
500 
501 	panic("%s: not implemented", __func__);
502 }
503 
504 int
505 pgid_in_session(struct proc *p, pid_t pg_id)
506 {
507 
508 	panic("%s: not implemented", __func__);
509 }
510 
511 int
512 sigispending(struct lwp *l, int signo)
513 {
514 
515 	return 0;
516 }
517 
518 void
519 sigpending1(struct lwp *l, sigset_t *ss)
520 {
521 
522 	panic("%s: not implemented", __func__);
523 }
524 
525 int
526 kpause(const char *wmesg, bool intr, int timeo, kmutex_t *mtx)
527 {
528 	extern int hz;
529 	int rv, error;
530 	uint64_t sec, nsec;
531 
532 	if (mtx)
533 		mutex_exit(mtx);
534 
535 	sec = timeo / hz;
536 	nsec = (timeo % hz) * (1000000000 / hz);
537 	rv = rumpuser_nanosleep(&sec, &nsec, &error);
538 
539 	if (mtx)
540 		mutex_enter(mtx);
541 
542 	if (rv)
543 		return error;
544 
545 	return 0;
546 }
547 
548 void
549 suspendsched(void)
550 {
551 
552 	/* we don't control scheduling currently, can't do anything now */
553 }
554 
555 u_int
556 lwp_unsleep(lwp_t *l, bool cleanup)
557 {
558 
559 	KASSERT(mutex_owned(l->l_mutex));
560 
561 	return (*l->l_syncobj->sobj_unsleep)(l, cleanup);
562 }
563 
564 vaddr_t
565 calc_cache_size(struct vm_map *map, int pct, int va_pct)
566 {
567 	paddr_t t;
568 
569 	t = (paddr_t)physmem * pct / 100 * PAGE_SIZE;
570 	if ((vaddr_t)t != t) {
571 		panic("%s: needs tweak", __func__);
572 	}
573 	return t;
574 }
575 
576 int
577 seltrue(dev_t dev, int events, struct lwp *l)
578 {
579         return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
580 }
581 
582 void
583 selrecord(lwp_t *selector, struct selinfo *sip)
584 {
585 }
586 
587 void
588 selinit(struct selinfo *sip)
589 {
590 }
591 
592 void
593 selnotify(struct selinfo *sip, int events, long knhint)
594 {
595 }
596 
597 void
598 seldestroy(struct selinfo *sip)
599 {
600 }
601 
602 const char *
603 device_xname(device_t dv)
604 {
605 	return "bogus0";
606 }
607 
608 void
609 assert_sleepable(void)
610 {
611 
612 	/* always sleepable, although we should improve this */
613 }
614 
615 void
616 tc_setclock(const struct timespec *ts)
617 {
618 
619 	panic("%s: not implemented", __func__);
620 }
621 
622 int
623 proc_uidmatch(kauth_cred_t cred, kauth_cred_t target)
624 {
625 
626 	panic("%s: not implemented", __func__);
627 }
628 
629 void
630 proc_crmod_enter(void)
631 {
632 
633 	panic("%s: not implemented", __func__);
634 }
635 
636 void
637 proc_crmod_leave(kauth_cred_t c1, kauth_cred_t c2, bool sugid)
638 {
639 
640 	panic("%s: not implemented", __func__);
641 }
642 
643 void
644 module_init_md(void)
645 {
646 
647 	/*
648 	 * Nothing for now.  However, we should load the librump
649 	 * symbol table.
650 	 */
651 }
652 
653 /* us and them, after all we're only ordinary seconds */
654 static void
655 rump_delay(unsigned int us)
656 {
657 	uint64_t sec, nsec;
658 	int error;
659 
660 	sec = us / 1000000;
661 	nsec = (us % 1000000) * 1000;
662 
663 	if (__predict_false(sec != 0))
664 		printf("WARNING: over 1s delay\n");
665 
666 	rumpuser_nanosleep(&sec, &nsec, &error);
667 }
668 void (*delay_func)(unsigned int) = rump_delay;
669 
670 void
671 kpreempt_disable(void)
672 {
673 
674 	/* XXX: see below */
675 	KPREEMPT_DISABLE(curlwp);
676 }
677 
678 void
679 kpreempt_enable(void)
680 {
681 
682 	/* try to make sure kpreempt_disable() is only used from panic() */
683 	panic("kpreempt not supported");
684 }
685 
686 void
687 proc_sesshold(struct session *ss)
688 {
689 
690 	panic("proc_sesshold() impossible, session %p", ss);
691 }
692 
693 void
694 proc_sessrele(struct session *ss)
695 {
696 
697 	panic("proc_sessrele() impossible, session %p", ss);
698 }
699 
700 int
701 ttycheckoutq(struct tty *tp, int wait)
702 {
703 
704 	return 1;
705 }
706 
707 void
708 cnputc(int c)
709 {
710 	int error;
711 
712 	rumpuser_putchar(c, &error);
713 }
714 
715 void
716 cnflush(void)
717 {
718 
719 	/* done */
720 }
721 
722 int
723 tputchar(int c, int flags, struct tty *tp)
724 {
725 
726 	cnputc(c);
727 	return 0;
728 }
729 
730 void
731 cpu_reboot(int howto, char *bootstr)
732 {
733 
734 	rump_reboot(howto);
735 
736 	/* this function is __dead, we must exit */
737 	rumpuser_exit(0);
738 }
739 
740 /*
741  * XXX: from sys_select.c, see that file for license.
742  * (these will go away really soon in favour of the real sys_select.c)
743  * ((really, the select code just needs cleanup))
744  * (((seriously)))
745  */
746 int
747 inittimeleft(struct timespec *ts, struct timespec *sleepts)
748 {
749 	if (itimespecfix(ts))
750 		return -1;
751 	getnanouptime(sleepts);
752 	return 0;
753 }
754 
755 int
756 gettimeleft(struct timespec *ts, struct timespec *sleepts)
757 {
758 	/*
759 	 * We have to recalculate the timeout on every retry.
760 	 */
761 	struct timespec sleptts;
762 	/*
763 	 * reduce ts by elapsed time
764 	 * based on monotonic time scale
765 	 */
766 	getnanouptime(&sleptts);
767 	timespecadd(ts, sleepts, ts);
768 	timespecsub(ts, &sleptts, ts);
769 	*sleepts = sleptts;
770 	return tstohz(ts);
771 }
772 
773 bool
774 pmf_device_register1(struct device *dev,
775 	bool (*suspend)(device_t PMF_FN_PROTO),
776 	bool (*resume)(device_t PMF_FN_PROTO),
777 	bool (*shutdown)(device_t, int))
778 {
779 
780 	return true;
781 }
782 
783 void
784 pmf_device_deregister(struct device *dev)
785 {
786 
787 	/* nada */
788 }
789