xref: /netbsd-src/sys/arch/m68k/m68k/vm_machdep.c (revision 4ce8e9ad42d56e059075cfce2f28f0a8989965ed)
1 /*	$NetBSD: vm_machdep.c,v 1.43 2024/01/19 03:35:31 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
37  *
38  *	@(#)vm_machdep.c	8.6 (Berkeley) 1/12/94
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.43 2024/01/19 03:35:31 thorpej Exp $");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/cpu.h>
48 #include <sys/buf.h>
49 #include <sys/vnode.h>
50 #include <sys/core.h>
51 #include <sys/exec.h>
52 
53 #include <machine/frame.h>
54 #include <machine/pte.h>
55 #include <machine/pcb.h>
56 
57 #include <uvm/uvm_extern.h>
58 
59 void
cpu_proc_fork(struct proc * p1,struct proc * p2)60 cpu_proc_fork(struct proc *p1, struct proc *p2)
61 {
62 
63 	p2->p_md.mdp_flags = p1->p_md.mdp_flags;
64 }
65 
66 /*
67  * Finish a fork operation, with process l2 nearly set up.
68  * Copy and update the pcb and trap frame, making the child ready to run.
69  *
70  * Rig the child's kernel stack so that it will start out in
71  * lwp_trampoline() and call child_return() with l2 as an
72  * argument. This causes the newly-created child process to go
73  * directly to user level with an apparent return value of 0 from
74  * fork(), while the parent process returns normally.
75  *
76  * l1 is the process being forked; if l1 == &lwp0, we are creating
77  * a kernel thread, and the return path and argument are specified with
78  * `func' and `arg'.
79  *
80  * If an alternate user-level stack is requested (with non-zero values
81  * in both the stack and stacksize args), set up the user stack pointer
82  * accordingly.
83  */
84 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)85 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
86     void (*func)(void *), void *arg)
87 {
88 	struct pcb *pcb1, *pcb2;
89 	struct trapframe *tf;
90 	struct switchframe *sf;
91 
92 	pcb1 = lwp_getpcb(l1);
93 	pcb2 = lwp_getpcb(l2);
94 
95 	l2->l_md.md_flags = l1->l_md.md_flags;
96 
97 	/* Copy pcb from lwp l1 to l2. */
98 	if (l1 == curlwp) {
99 		/* Sync the PCB before we copy it. */
100 		savectx(curpcb);
101 	} else {
102 		KASSERT(l1 == &lwp0);
103 	}
104 
105 	*pcb2 = *pcb1;
106 
107 	/*
108 	 * Copy the trap frame.
109 	 */
110 	tf = (struct trapframe *)(uvm_lwp_getuarea(l2) + USPACE) - 1;
111 	l2->l_md.md_regs = (int *)tf;
112 	*tf = *(struct trapframe *)l1->l_md.md_regs;
113 
114 	/*
115 	 * If specified, give the child a different stack.
116 	 */
117 	if (stack != NULL)
118 		tf->tf_regs[15] = (u_int)stack + stacksize;
119 
120 	sf = (struct switchframe *)tf - 1;
121 	sf->sf_pc = (u_int)lwp_trampoline;
122 	pcb2->pcb_regs[6] = (int)func;		/* A2 */
123 	pcb2->pcb_regs[7] = (int)arg;		/* A3 */
124 	pcb2->pcb_regs[8] = (int)l2;		/* A4 */
125 	pcb2->pcb_regs[11] = (int)sf;		/* SSP */
126 	pcb2->pcb_ps = PSL_LOWIPL;		/* start kthreads at IPL 0 */
127 }
128 
129 void
cpu_lwp_free(struct lwp * l,int proc)130 cpu_lwp_free(struct lwp *l, int proc)
131 {
132 
133 	/* Nothing to do */
134 }
135 
136 void
cpu_lwp_free2(struct lwp * l)137 cpu_lwp_free2(struct lwp *l)
138 {
139 
140 	/* Nothing to do */
141 }
142 
143 /*
144  * Map a user I/O request into kernel virtual address space.
145  * Note: the pages are already locked by uvm_vslock(), so we
146  * do not need to pass an access_type to pmap_enter().
147  */
148 int
vmapbuf(struct buf * bp,vsize_t len)149 vmapbuf(struct buf *bp, vsize_t len)
150 {
151 	struct pmap *upmap, *kpmap __unused;
152 	vaddr_t uva;		/* User VA (map from) */
153 	vaddr_t kva;		/* Kernel VA (new to) */
154 	paddr_t pa;		/* physical address */
155 	vsize_t off;
156 
157 	if ((bp->b_flags & B_PHYS) == 0)
158 		panic("vmapbuf");
159 
160 	uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data);
161 	off = (vaddr_t)bp->b_data - uva;
162 	len = m68k_round_page(off + len);
163 	kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
164 	bp->b_data = (void *)(kva + off);
165 
166 	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
167 	kpmap = vm_map_pmap(phys_map);
168 	do {
169 		if (pmap_extract(upmap, uva, &pa) == false)
170 			panic("vmapbuf: null page frame");
171 #ifdef M68K_VAC
172 		pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE,
173 		    PMAP_WIRED);
174 #else
175 		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
176 #endif
177 		uva += PAGE_SIZE;
178 		kva += PAGE_SIZE;
179 		len -= PAGE_SIZE;
180 	} while (len);
181 	pmap_update(kpmap);
182 
183 	return 0;
184 }
185 
186 /*
187  * Unmap a previously-mapped user I/O request.
188  */
189 void
vunmapbuf(struct buf * bp,vsize_t len)190 vunmapbuf(struct buf *bp, vsize_t len)
191 {
192 	vaddr_t kva;
193 	vsize_t off;
194 
195 	if ((bp->b_flags & B_PHYS) == 0)
196 		panic("vunmapbuf");
197 
198 	kva = m68k_trunc_page(bp->b_data);
199 	off = (vaddr_t)bp->b_data - kva;
200 	len = m68k_round_page(off + len);
201 
202 #ifdef M68K_VAC
203 	pmap_remove(vm_map_pmap(phys_map), kva, kva + len);
204 #else
205 	pmap_kremove(kva, len);
206 #endif
207 	pmap_update(pmap_kernel());
208 	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
209 	bp->b_data = bp->b_saveaddr;
210 	bp->b_saveaddr = 0;
211 }
212