xref: /openbsd-src/sys/arch/i386/i386/vm_machdep.c (revision b3af768da0f4194b8b809a68f9cd547e117a1619)
1 /*	$OpenBSD: vm_machdep.c,v 1.74 2023/04/11 00:45:07 jsg Exp $	*/
2 /*	$NetBSD: vm_machdep.c,v 1.61 1996/05/03 19:42:35 christos Exp $	*/
3 
4 /*-
5  * Copyright (c) 1995 Charles M. Hannum.  All rights reserved.
6  * Copyright (c) 1982, 1986 The Regents of the University of California.
7  * Copyright (c) 1989, 1990 William Jolitz
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department, and William Jolitz.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
39  */
40 
41 /*
42  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43  */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/proc.h>
48 #include <sys/buf.h>
49 #include <sys/user.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #include "npx.h"
54 
55 /*
56  * Finish a fork operation, with process p2 nearly set up.
57  * Copy and update the kernel stack and pcb, making the child
58  * ready to run, and marking it so that it can return differently
59  * than the parent.  Returns 1 in the child process, 0 in the parent.
60  */
61 void
cpu_fork(struct proc * p1,struct proc * p2,void * stack,void * tcb,void (* func)(void *),void * arg)62 cpu_fork(struct proc *p1, struct proc *p2, void *stack, void *tcb,
63     void (*func)(void *), void *arg)
64 {
65 	struct pcb *pcb = &p2->p_addr->u_pcb;
66 	struct trapframe *tf;
67 	struct switchframe *sf;
68 
69 #if NNPX > 0
70 	npxsave_proc(p1, 1);
71 #endif
72 
73 	p2->p_md.md_flags = p1->p_md.md_flags;
74 
75 #ifdef DIAGNOSTIC
76 	if (p1 != curproc && p1 != &proc0)
77 		panic("cpu_fork: curproc");
78 #endif
79 	*pcb = p1->p_addr->u_pcb;
80 
81 	pcb->pcb_kstack = (int)p2->p_addr + USPACE - 16 -
82 	    (arc4random() & PAGE_MASK & ~_STACKALIGNBYTES);
83 
84 	/*
85 	 * Copy the trapframe, and arrange for the child to return directly
86 	 */
87 	p2->p_md.md_regs = tf = (struct trapframe *)pcb->pcb_kstack - 1;
88 	*tf = *p1->p_md.md_regs;
89 
90 	/*
91 	 * If specified, give the child a different stack and/or TCB
92 	 */
93 	if (stack != NULL)
94 		tf->tf_esp = (u_int)stack;
95 	if (tcb != NULL)
96 		i386_set_threadbase(p2, (uint32_t)tcb, TSEG_GS);
97 
98 	sf = (struct switchframe *)tf - 1;
99 	sf->sf_esi = (int)func;
100 	sf->sf_ebx = (int)arg;
101 	sf->sf_eip = (int)proc_trampoline;
102 	pcb->pcb_esp = (int)sf;
103 	pcb->pcb_ebp = 0;
104 }
105 
106 /*
107  * cpu_exit is called as the last action during exit.
108  */
109 void
cpu_exit(struct proc * p)110 cpu_exit(struct proc *p)
111 {
112 #if NNPX > 0
113 	/* If we were using the FPU, forget about it. */
114 	if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
115 		npxsave_proc(p, 0);
116 #endif
117 	sched_exit(p);
118 }
119 
120 /*
121  * Convert kernel VA to physical address
122  */
123 int
kvtop(caddr_t addr)124 kvtop(caddr_t addr)
125 {
126 	paddr_t pa;
127 
128 	if (pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa) == FALSE)
129 		panic("kvtop: zero page frame");
130 	return((int)pa);
131 }
132 
133 struct kmem_va_mode kv_physwait = {
134 	.kv_map = &phys_map,
135 	.kv_wait = 1,
136 };
137 
138 /*
139  * Map a user I/O request into kernel virtual address space.
140  * Note: the pages are already locked by uvm_vslock(), so we
141  * do not need to pass an access_type to pmap_enter().
142  */
143 void
vmapbuf(struct buf * bp,vsize_t len)144 vmapbuf(struct buf *bp, vsize_t len)
145 {
146 	vaddr_t faddr, taddr, off;
147 	paddr_t fpa;
148 
149 	if ((bp->b_flags & B_PHYS) == 0)
150 		panic("vmapbuf");
151 	faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
152 	off = (vaddr_t)bp->b_data - faddr;
153 	len = round_page(off + len);
154 	taddr = (vaddr_t)km_alloc(len, &kv_physwait, &kp_none, &kd_waitok);
155 	bp->b_data = (caddr_t)(taddr + off);
156 	/*
157 	 * The region is locked, so we expect that pmap_pte() will return
158 	 * non-NULL.
159 	 * XXX: unwise to expect this in a multithreaded environment.
160 	 * anything can happen to a pmap between the time we lock a
161 	 * region, release the pmap lock, and then relock it for
162 	 * the pmap_extract().
163 	 *
164 	 * no need to flush TLB since we expect nothing to be mapped
165 	 * where we just allocated (TLB will be flushed when our
166 	 * mapping is removed).
167 	 */
168 	while (len) {
169 		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
170 		    faddr, &fpa);
171 		pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
172 		faddr += PAGE_SIZE;
173 		taddr += PAGE_SIZE;
174 		len -= PAGE_SIZE;
175 	}
176 	pmap_update(pmap_kernel());
177 }
178 
179 /*
180  * Unmap a previously-mapped user I/O request.
181  */
182 void
vunmapbuf(struct buf * bp,vsize_t len)183 vunmapbuf(struct buf *bp, vsize_t len)
184 {
185 	vaddr_t addr, off;
186 
187 	if ((bp->b_flags & B_PHYS) == 0)
188 		panic("vunmapbuf");
189 	addr = trunc_page((vaddr_t)bp->b_data);
190 	off = (vaddr_t)bp->b_data - addr;
191 	len = round_page(off + len);
192 	pmap_kremove(addr, len);
193 	pmap_update(pmap_kernel());
194 	km_free((void *)addr, len, &kv_physwait, &kp_none);
195 	bp->b_data = bp->b_saveaddr;
196 	bp->b_saveaddr = NULL;
197 }
198