xref: /dflybsd-src/sys/vm/vm_vmspace.c (revision dae741e33c840b92a8a53bf9f01157ede145e256)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kern_syscall.h>
42 #include <sys/mman.h>
43 #include <sys/thread.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/sysctl.h>
47 #include <sys/vkernel.h>
48 #include <sys/vmspace.h>
49 
50 #include <vm/vm_extern.h>
51 #include <vm/pmap.h>
52 
53 #include <machine/vmparam.h>
54 
55 #include <sys/sysref2.h>
56 #include <sys/mplock2.h>
57 
58 static struct vmspace_entry *vkernel_find_vmspace(struct vkernel_proc *vkp,
59 						  void *id);
60 static void vmspace_entry_delete(struct vmspace_entry *ve,
61 				 struct vkernel_proc *vkp);
62 
63 static MALLOC_DEFINE(M_VKERNEL, "vkernel", "VKernel structures");
64 
65 /*
66  * vmspace_create (void *id, int type, void *data)
67  *
68  * Create a VMSPACE under the control of the caller with the specified id.
69  * An id of NULL cannot be used.  The type and data fields must currently
70  * be 0.
71  *
72  * The vmspace starts out completely empty.  Memory may be mapped into the
73  * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
74  * with vmspace_mcontrol().
75  *
76  * No requirements.
77  */
78 int
79 sys_vmspace_create(struct vmspace_create_args *uap)
80 {
81 	struct vmspace_entry *ve;
82 	struct vkernel_proc *vkp;
83 	struct proc *p = curproc;
84 	int error;
85 
86 	if (vkernel_enable == 0)
87 		return (EOPNOTSUPP);
88 
89 	/*
90 	 * Create a virtual kernel side-structure for the process if one
91 	 * does not exist.
92 	 *
93 	 * Implement a simple resolution for SMP races.
94 	 */
95 	if ((vkp = p->p_vkernel) == NULL) {
96 		vkp = kmalloc(sizeof(*vkp), M_VKERNEL, M_WAITOK|M_ZERO);
97 		lwkt_gettoken(&proc_token);
98 		if (p->p_vkernel == NULL) {
99 			vkp->refs = 1;
100 			lwkt_token_init(&vkp->token, "vkernel");
101 			RB_INIT(&vkp->root);
102 			p->p_vkernel = vkp;
103 		} else {
104 			kfree(vkp, M_VKERNEL);
105 			vkp = p->p_vkernel;
106 		}
107 		lwkt_reltoken(&proc_token);
108 	}
109 
110 	get_mplock();
111 
112 	/*
113 	 * Create a new VMSPACE, disallow conflicting ids
114 	 */
115 	ve = kmalloc(sizeof(struct vmspace_entry), M_VKERNEL, M_WAITOK|M_ZERO);
116 	ve->vmspace = vmspace_alloc(VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
117 	ve->id = uap->id;
118 	pmap_pinit2(vmspace_pmap(ve->vmspace));
119 
120 	lwkt_gettoken(&vkp->token);
121 	if (RB_INSERT(vmspace_rb_tree, &vkp->root, ve)) {
122 		sysref_put(&ve->vmspace->vm_sysref);
123 		kfree(ve, M_VKERNEL);
124 		error = EEXIST;
125 	} else {
126 		error = 0;
127 	}
128 	lwkt_reltoken(&vkp->token);
129 	rel_mplock();
130 	return (error);
131 }
132 
133 /*
134  * Destroy a VMSPACE given its identifier.
135  *
136  * No requirements.
137  */
138 int
139 sys_vmspace_destroy(struct vmspace_destroy_args *uap)
140 {
141 	struct vkernel_proc *vkp;
142 	struct vmspace_entry *ve;
143 	int error;
144 
145 	get_mplock();
146 	if ((vkp = curproc->p_vkernel) == NULL) {
147 		error = EINVAL;
148 		goto done3;
149 	}
150 	lwkt_gettoken(&vkp->token);
151 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
152 		error = ENOENT;
153 		goto done2;
154 	}
155 	if (ve->refs) {
156 		error = EBUSY;
157 		goto done2;
158 	}
159 	vmspace_entry_delete(ve, vkp);
160 	error = 0;
161 done2:
162 	lwkt_reltoken(&vkp->token);
163 done3:
164 	rel_mplock();
165 	return(error);
166 }
167 
168 /*
169  * vmspace_ctl (void *id, int cmd, struct trapframe *tframe,
170  *		struct vextframe *vframe);
171  *
172  * Transfer control to a VMSPACE.  Control is returned after the specified
173  * number of microseconds or if a page fault, signal, trap, or system call
174  * occurs.  The context is updated as appropriate.
175  *
176  * No requirements.
177  */
178 int
179 sys_vmspace_ctl(struct vmspace_ctl_args *uap)
180 {
181 	struct vkernel_proc *vkp;
182 	struct vkernel_lwp *vklp;
183 	struct vmspace_entry *ve;
184 	struct lwp *lp;
185 	struct proc *p;
186 	int framesz;
187 	int error;
188 
189 	lp = curthread->td_lwp;
190 	p = lp->lwp_proc;
191 
192 	if ((vkp = p->p_vkernel) == NULL)
193 		return (EINVAL);
194 
195 	get_mplock();
196 	lwkt_gettoken(&vkp->token);
197 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
198 		error = ENOENT;
199 		goto done;
200 	}
201 
202 	/*
203 	 * Signal mailbox interlock
204 	 */
205 	if (p->p_flag & P_MAILBOX) {
206 		lwkt_gettoken(&p->p_token);
207 		p->p_flag &= ~P_MAILBOX;
208 		lwkt_reltoken(&p->p_token);
209 		error = EINTR;
210 		goto done;
211 	}
212 
213 	switch(uap->cmd) {
214 	case VMSPACE_CTL_RUN:
215 		/*
216 		 * Save the caller's register context, swap VM spaces, and
217 		 * install the passed register context.  Return with
218 		 * EJUSTRETURN so the syscall code doesn't adjust the context.
219 		 */
220 		atomic_add_int(&ve->refs, 1);
221 		framesz = sizeof(struct trapframe);
222 		if ((vklp = lp->lwp_vkernel) == NULL) {
223 			vklp = kmalloc(sizeof(*vklp), M_VKERNEL,
224 				       M_WAITOK|M_ZERO);
225 			lp->lwp_vkernel = vklp;
226 		}
227 		vklp->user_trapframe = uap->tframe;
228 		vklp->user_vextframe = uap->vframe;
229 		bcopy(uap->sysmsg_frame, &vklp->save_trapframe, framesz);
230 		bcopy(&curthread->td_tls, &vklp->save_vextframe.vx_tls,
231 		      sizeof(vklp->save_vextframe.vx_tls));
232 		error = copyin(uap->tframe, uap->sysmsg_frame, framesz);
233 		if (error == 0) {
234 			error = copyin(&uap->vframe->vx_tls,
235 				       &curthread->td_tls,
236 				       sizeof(struct savetls));
237 		}
238 		if (error == 0)
239 			error = cpu_sanitize_frame(uap->sysmsg_frame);
240 		if (error == 0)
241 			error = cpu_sanitize_tls(&curthread->td_tls);
242 		if (error) {
243 			bcopy(&vklp->save_trapframe, uap->sysmsg_frame,
244 			      framesz);
245 			bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
246 			      sizeof(vklp->save_vextframe.vx_tls));
247 			set_user_TLS();
248 			atomic_subtract_int(&ve->refs, 1);
249 		} else {
250 			vklp->ve = ve;
251 			pmap_setlwpvm(lp, ve->vmspace);
252 			set_user_TLS();
253 			set_vkernel_fp(uap->sysmsg_frame);
254 			error = EJUSTRETURN;
255 		}
256 		break;
257 	default:
258 		error = EOPNOTSUPP;
259 		break;
260 	}
261 done:
262 	lwkt_reltoken(&vkp->token);
263 	rel_mplock();
264 	return(error);
265 }
266 
267 /*
268  * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
269  *
270  * map memory within a VMSPACE.  This function is just like a normal mmap()
271  * but operates on the vmspace's memory map.  Most callers use this to create
272  * a MAP_VPAGETABLE mapping.
273  *
274  * No requirements.
275  */
276 int
277 sys_vmspace_mmap(struct vmspace_mmap_args *uap)
278 {
279 	struct vkernel_proc *vkp;
280 	struct vmspace_entry *ve;
281 	int error;
282 
283 	/*
284 	 * We hold the vmspace token to serialize calls to vkernel_find_vmspace.
285 	 */
286 	lwkt_gettoken(&vmspace_token);
287 	if ((vkp = curproc->p_vkernel) == NULL) {
288 		error = EINVAL;
289 		goto done3;
290 	}
291 
292 	/*
293 	 * NOTE: kern_mmap() can block so we need to temporarily ref ve->refs.
294 	 */
295 	lwkt_gettoken(&vkp->token);
296 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) != NULL) {
297 		atomic_add_int(&ve->refs, 1);
298 		error = kern_mmap(ve->vmspace, uap->addr, uap->len,
299 				  uap->prot, uap->flags,
300 				  uap->fd, uap->offset, &uap->sysmsg_resultp);
301 		atomic_subtract_int(&ve->refs, 1);
302 	} else {
303 		error = ENOENT;
304 	}
305 	lwkt_reltoken(&vkp->token);
306 done3:
307 	lwkt_reltoken(&vmspace_token);
308 	return (error);
309 }
310 
311 /*
312  * vmspace_munmap(id, addr, len)
313  *
314  * unmap memory within a VMSPACE.
315  *
316  * No requirements.
317  */
318 int
319 sys_vmspace_munmap(struct vmspace_munmap_args *uap)
320 {
321 	struct vkernel_proc *vkp;
322 	struct vmspace_entry *ve;
323 	vm_offset_t addr;
324 	vm_offset_t tmpaddr;
325 	vm_size_t size, pageoff;
326 	vm_map_t map;
327 	int error;
328 
329 	get_mplock();
330 	if ((vkp = curproc->p_vkernel) == NULL) {
331 		error = EINVAL;
332 		goto done3;
333 	}
334 	lwkt_gettoken(&vkp->token);
335 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
336 		error = ENOENT;
337 		goto done2;
338 	}
339 
340 	/*
341 	 * NOTE: kern_munmap() can block so we need to temporarily
342 	 *	 ref ve->refs.
343 	 */
344 	atomic_add_int(&ve->refs, 1);
345 
346 	/*
347 	 * Copied from sys_munmap()
348 	 */
349 	addr = (vm_offset_t)uap->addr;
350 	size = uap->len;
351 
352 	pageoff = (addr & PAGE_MASK);
353 	addr -= pageoff;
354 	size += pageoff;
355 	size = (vm_size_t)round_page(size);
356 	if (size < uap->len) {		/* wrap */
357 		error = EINVAL;
358 		goto done1;
359 	}
360 	tmpaddr = addr + size;		/* workaround gcc4 opt */
361 	if (tmpaddr < addr) {		/* wrap */
362 		error = EINVAL;
363 		goto done1;
364 	}
365 	if (size == 0) {
366 		error = 0;
367 		goto done1;
368 	}
369 
370 	if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) {
371 		error = EINVAL;
372 		goto done1;
373 	}
374 	if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) {
375 		error = EINVAL;
376 		goto done1;
377 	}
378 	map = &ve->vmspace->vm_map;
379 	if (!vm_map_check_protection(map, addr, tmpaddr, VM_PROT_NONE, FALSE)) {
380 		error = EINVAL;
381 		goto done1;
382 	}
383 	vm_map_remove(map, addr, addr + size);
384 	error = 0;
385 done1:
386 	atomic_subtract_int(&ve->refs, 1);
387 done2:
388 	lwkt_reltoken(&vkp->token);
389 done3:
390 	rel_mplock();
391 	return (error);
392 }
393 
394 /*
395  * vmspace_pread(id, buf, nbyte, flags, offset)
396  *
397  * Read data from a vmspace.  The number of bytes read is returned or
398  * -1 if an unrecoverable error occured.  If the number of bytes read is
399  * less then the request size, a page fault occured in the VMSPACE which
400  * the caller must resolve in order to proceed.
401  *
402  * (not implemented yet)
403  * No requirements.
404  */
405 int
406 sys_vmspace_pread(struct vmspace_pread_args *uap)
407 {
408 	struct vkernel_proc *vkp;
409 	struct vmspace_entry *ve;
410 	int error;
411 
412 	get_mplock();
413 	if ((vkp = curproc->p_vkernel) == NULL) {
414 		error = EINVAL;
415 		goto done3;
416 	}
417 	lwkt_gettoken(&vkp->token);
418 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
419 		error = ENOENT;
420 		goto done2;
421 	}
422 	error = EINVAL;
423 done2:
424 	lwkt_reltoken(&vkp->token);
425 done3:
426 	rel_mplock();
427 	return (error);
428 }
429 
430 /*
431  * vmspace_pwrite(id, buf, nbyte, flags, offset)
432  *
433  * Write data to a vmspace.  The number of bytes written is returned or
434  * -1 if an unrecoverable error occured.  If the number of bytes written is
435  * less then the request size, a page fault occured in the VMSPACE which
436  * the caller must resolve in order to proceed.
437  *
438  * (not implemented yet)
439  * No requirements.
440  */
441 int
442 sys_vmspace_pwrite(struct vmspace_pwrite_args *uap)
443 {
444 	struct vkernel_proc *vkp;
445 	struct vmspace_entry *ve;
446 	int error;
447 
448 	get_mplock();
449 	if ((vkp = curproc->p_vkernel) == NULL) {
450 		error = EINVAL;
451 		goto done3;
452 	}
453 	lwkt_gettoken(&vkp->token);
454 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
455 		error = ENOENT;
456 		goto done2;
457 	}
458 	error = EINVAL;
459 done2:
460 	lwkt_reltoken(&vkp->token);
461 done3:
462 	rel_mplock();
463 	return (error);
464 }
465 
466 /*
467  * vmspace_mcontrol(id, addr, len, behav, value)
468  *
469  * madvise/mcontrol support for a vmspace.
470  *
471  * No requirements.
472  */
473 int
474 sys_vmspace_mcontrol(struct vmspace_mcontrol_args *uap)
475 {
476 	struct vkernel_proc *vkp;
477 	struct vmspace_entry *ve;
478 	vm_offset_t start, end;
479 	vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len;
480 	int error;
481 
482 	get_mplock();
483 	if ((vkp = curproc->p_vkernel) == NULL) {
484 		error = EINVAL;
485 		goto done3;
486 	}
487 	lwkt_gettoken(&vkp->token);
488 	if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
489 		error = ENOENT;
490 		goto done2;
491 	}
492 
493 	/*
494 	 * NOTE: kern_madvise() can block so we need to temporarily
495 	 *	 ref ve->refs.
496 	 */
497 	atomic_add_int(&ve->refs, 1);
498 
499 	/*
500 	 * This code is basically copied from sys_mcontrol()
501 	 */
502 	if (uap->behav < 0 || uap->behav > MADV_CONTROL_END) {
503 		error = EINVAL;
504 		goto done1;
505 	}
506 
507 	if (tmpaddr < (vm_offset_t)uap->addr) {
508 		error = EINVAL;
509 		goto done1;
510 	}
511 	if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) {
512 		error = EINVAL;
513 		goto done1;
514 	}
515         if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) {
516 		error = EINVAL;
517 		goto done1;
518 	}
519 
520 	start = trunc_page((vm_offset_t) uap->addr);
521 	end = round_page(tmpaddr);
522 
523 	error = vm_map_madvise(&ve->vmspace->vm_map, start, end,
524 				uap->behav, uap->value);
525 done1:
526 	atomic_subtract_int(&ve->refs, 1);
527 done2:
528 	lwkt_reltoken(&vkp->token);
529 done3:
530 	rel_mplock();
531 	return (error);
532 }
533 
534 /*
535  * Red black tree functions
536  */
537 static int rb_vmspace_compare(struct vmspace_entry *, struct vmspace_entry *);
538 RB_GENERATE(vmspace_rb_tree, vmspace_entry, rb_entry, rb_vmspace_compare);
539 
540 /*
541  * a->start is address, and the only field has to be initialized.
542  * The caller must hold vkp->token.
543  *
544  * The caller must hold vkp->token.
545  */
546 static int
547 rb_vmspace_compare(struct vmspace_entry *a, struct vmspace_entry *b)
548 {
549         if ((char *)a->id < (char *)b->id)
550                 return(-1);
551         else if ((char *)a->id > (char *)b->id)
552                 return(1);
553         return(0);
554 }
555 
556 /*
557  * The caller must hold vkp->token.
558  */
559 static
560 int
561 rb_vmspace_delete(struct vmspace_entry *ve, void *data)
562 {
563 	struct vkernel_proc *vkp = data;
564 
565 	KKASSERT(ve->refs == 0);
566 	vmspace_entry_delete(ve, vkp);
567 	return(0);
568 }
569 
570 /*
571  * Remove a vmspace_entry from the RB tree and destroy it.  We have to clean
572  * up the pmap, the vm_map, then destroy the vmspace.
573  *
574  * This function must remove the ve immediately before it might potentially
575  * block.
576  *
577  * The caller must hold vkp->token.
578  */
579 static
580 void
581 vmspace_entry_delete(struct vmspace_entry *ve, struct vkernel_proc *vkp)
582 {
583 	RB_REMOVE(vmspace_rb_tree, &vkp->root, ve);
584 
585 	pmap_remove_pages(vmspace_pmap(ve->vmspace),
586 			  VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
587 	vm_map_remove(&ve->vmspace->vm_map,
588 		      VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
589 	sysref_put(&ve->vmspace->vm_sysref);
590 	kfree(ve, M_VKERNEL);
591 }
592 
593 /*
594  * Locate the ve for (id), return the ve or NULL.  If found this function
595  * will bump ve->refs which prevents the ve from being immediately destroyed
596  * (but it can still be removed).
597  *
598  * The caller must hold vkp->token.
599  */
600 static
601 struct vmspace_entry *
602 vkernel_find_vmspace(struct vkernel_proc *vkp, void *id)
603 {
604 	struct vmspace_entry *ve;
605 	struct vmspace_entry key;
606 
607 	key.id = id;
608 	ve = RB_FIND(vmspace_rb_tree, &vkp->root, &key);
609 	return (ve);
610 }
611 
612 /*
613  * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
614  * a vkernel process.
615  *
616  * No requirements.
617  */
618 void
619 vkernel_inherit(struct proc *p1, struct proc *p2)
620 {
621 	struct vkernel_proc *vkp;
622 
623 	vkp = p1->p_vkernel;
624 	KKASSERT(vkp->refs > 0);
625 	atomic_add_int(&vkp->refs, 1);
626 	p2->p_vkernel = vkp;
627 }
628 
629 /*
630  * No requirements.
631  */
632 void
633 vkernel_exit(struct proc *p)
634 {
635 	struct vkernel_proc *vkp;
636 	struct lwp *lp;
637 
638 	vkp = p->p_vkernel;
639 
640 	/*
641 	 * Restore the original VM context if we are killed while running
642 	 * a different one.
643 	 *
644 	 * This isn't supposed to happen.  What is supposed to happen is
645 	 * that the process should enter vkernel_trap() before the handling
646 	 * the signal.
647 	 */
648 	RB_FOREACH(lp, lwp_rb_tree, &p->p_lwp_tree) {
649 		vkernel_lwp_exit(lp);
650 	}
651 
652 	/*
653 	 * Dereference the common area
654 	 */
655 	p->p_vkernel = NULL;
656 	KKASSERT(vkp->refs > 0);
657 
658 	if (atomic_fetchadd_int(&vkp->refs, -1) == 1) {
659 		lwkt_gettoken(&vkp->token);
660 		RB_SCAN(vmspace_rb_tree, &vkp->root, NULL,
661 			rb_vmspace_delete, vkp);
662 		lwkt_reltoken(&vkp->token);
663 		kfree(vkp, M_VKERNEL);
664 	}
665 }
666 
667 /*
668  * No requirements.
669  */
670 void
671 vkernel_lwp_exit(struct lwp *lp)
672 {
673 	struct vkernel_lwp *vklp;
674 	struct vmspace_entry *ve;
675 
676 	if ((vklp = lp->lwp_vkernel) != NULL) {
677 		if ((ve = vklp->ve) != NULL) {
678 			kprintf("Warning, pid %d killed with "
679 				"active VC!\n", lp->lwp_proc->p_pid);
680 			pmap_setlwpvm(lp, lp->lwp_proc->p_vmspace);
681 			vklp->ve = NULL;
682 			KKASSERT(ve->refs > 0);
683 			atomic_subtract_int(&ve->refs, 1);
684 		}
685 		lp->lwp_vkernel = NULL;
686 		kfree(vklp, M_VKERNEL);
687 	}
688 }
689 
690 /*
691  * A VM space under virtual kernel control trapped out or made a system call
692  * or otherwise needs to return control to the virtual kernel context.
693  *
694  * No requirements.
695  */
696 void
697 vkernel_trap(struct lwp *lp, struct trapframe *frame)
698 {
699 	struct proc *p = lp->lwp_proc;
700 	struct vmspace_entry *ve;
701 	struct vkernel_lwp *vklp;
702 	int error;
703 
704 	/*
705 	 * Which vmspace entry was running?
706 	 */
707 	vklp = lp->lwp_vkernel;
708 	KKASSERT(vklp);
709 	ve = vklp->ve;
710 	KKASSERT(ve != NULL);
711 
712 	/*
713 	 * Switch the LWP vmspace back to the virtual kernel's VM space.
714 	 */
715 	vklp->ve = NULL;
716 	pmap_setlwpvm(lp, p->p_vmspace);
717 	KKASSERT(ve->refs > 0);
718 	atomic_subtract_int(&ve->refs, 1);
719 	/* ve is invalid once we kill our ref */
720 
721 	/*
722 	 * Copy the emulated process frame to the virtual kernel process.
723 	 * The emulated process cannot change TLS descriptors so don't
724 	 * bother saving them, we already have a copy.
725 	 *
726 	 * Restore the virtual kernel's saved context so the virtual kernel
727 	 * process can resume.
728 	 */
729 	error = copyout(frame, vklp->user_trapframe, sizeof(*frame));
730 	bcopy(&vklp->save_trapframe, frame, sizeof(*frame));
731 	bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
732 	      sizeof(vklp->save_vextframe.vx_tls));
733 	set_user_TLS();
734 	cpu_vkernel_trap(frame, error);
735 }
736