xref: /netbsd-src/sys/arch/riscv/riscv/vm_machdep.c (revision 0ad349337a874f932507463a034e8710784222b7)
1 /*	$NetBSD: vm_machdep.c,v 1.9 2024/08/04 08:16:26 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.9 2024/08/04 08:16:26 skrll Exp $");
34 
35 #define _PMAP_PRIVATE
36 
37 #include "opt_ddb.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/buf.h>
43 #include <sys/cpu.h>
44 #include <sys/vnode.h>
45 #include <sys/core.h>
46 #include <sys/exec.h>
47 
48 #include <uvm/uvm.h>
49 
50 #include <dev/mm.h>
51 
52 #include <riscv/frame.h>
53 #include <riscv/locore.h>
54 #include <riscv/machdep.h>
55 
56 /*
57  * cpu_lwp_fork: Finish a fork operation, with lwp l2 nearly set up.
58  * Copy and update the pcb and trapframe, making the child ready to run.
59  *
60  * First LWP (l1) is the lwp being forked.  If it is &lwp0, then we are
61  * creating a kthread, where return path and argument are specified
62  * with `func' and `arg'.
63  *
64  * Rig the child's kernel stack so that it starts out in cpu_lwp_trampoline()
65  * and calls child_return() with l2 as an argument. This causes the
66  * newly-created child process to go directly to user level with an apparent
67  * return value of 0 from fork(), while the parent process returns normally.
68  *
69  * If an alternate user-level stack is requested (with non-zero values
70  * in both the stack and stacksize arguments), then set up the user stack
71  * pointer accordingly.
72  */
73 void
74 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
75     void (*func)(void *), void *arg)
76 {
77 	struct pcb * const pcb1 = lwp_getpcb(l1);
78 	struct pcb * const pcb2 = lwp_getpcb(l2);
79 	struct trapframe *tf;
80 
81 	KASSERT(l1 == curlwp || l1 == &lwp0);
82 	KASSERT(l2->l_md.md_astpending == 0);
83 
84 	/* Copy the PCB from parent. */
85 	*pcb2 = *pcb1;
86 
87 	/*
88 	 * Copy the trapframe from parent, so that return to userspace
89 	 * will be to right address, with correct registers.
90 	 */
91 	vaddr_t ua2 = uvm_lwp_getuarea(l2);
92 
93 	tf = (struct trapframe *)(ua2 + USPACE) - 1;
94 	*tf = *l1->l_md.md_utf;
95 #ifdef FPE
96 	tf->tf_sr &= ~SR_FS;	/* floating point must be disabled */
97 #endif
98 
99 	/* If specified, set a different user stack for a child. */
100 	if (stack != NULL) {
101 		tf->tf_sp = stack_align((intptr_t)stack + stacksize);
102 	}
103 
104 	l2->l_md.md_utf = tf;
105 
106 	/*
107 	 * Rig kernel stack so that it would start out in cpu_lwp_trampoline()
108 	 * and call child_return() with l as an argument.  This causes the
109 	 * newly-created child process to go directly to user level with a
110 	 * parent return value of 0 from fork(), while the parent process
111 	 * returns normally.
112 	 */
113 	--tf;	/* cpu_switchto uses trapframes */
114 
115 	tf->tf_s0 = 0;				/* S0 (aka frame pointer) */
116 	tf->tf_s1 = (intptr_t)func;		/* S1 */
117 	tf->tf_s2 = (intptr_t)arg;		/* S2 */
118 	tf->tf_ra = (intptr_t)lwp_trampoline;	/* RA */
119 
120 	l2->l_md.md_ktf = tf;			/* SP */
121 
122 	KASSERT(l2->l_md.md_astpending == 0);
123 }
124 
125 /*
126  * Routine to copy MD stuff from proc to proc on a fork.
127  */
128 void
129 cpu_proc_fork(struct proc *p1, struct proc *p2)
130 {
131 }
132 
133 #ifdef _LP64
134 void *
135 cpu_uarea_alloc(bool system)
136 {
137 	struct pglist pglist;
138 	int error;
139 
140 	/*
141 	 * Allocate a new physically contiguous uarea which can be
142 	 * direct-mapped.
143 	 */
144 	error = uvm_pglistalloc(USPACE, pmap_limits.avail_start,
145 	    pmap_limits.avail_end, USPACE_ALIGN, 0, &pglist, 1, 1);
146 	if (error) {
147 		return NULL;
148 	}
149 
150 	/*
151 	 * Get the physical address from the first page.
152 	 */
153 	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
154 	KASSERT(pg != NULL);
155 	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
156 	KASSERTMSG(pa >= pmap_limits.avail_start,
157 	    "pa (%#"PRIxPADDR") < avail_start (%#"PRIxPADDR")",
158 	     pa, pmap_limits.avail_start);
159 	KASSERTMSG(pa + USPACE <= pmap_limits.avail_end,
160 	    "pa (%#"PRIxPADDR") >= avail_end (%#"PRIxPADDR")",
161 	     pa, pmap_limits.avail_end);
162 
163 	/*
164 	 * we need to return a direct-mapped VA for the pa.
165 	 */
166 	return (void *)pmap_md_direct_map_paddr(pa);
167 }
168 
169 /*
170  * Return true if we freed it, false if we didn't.
171  */
172 bool
173 cpu_uarea_free(void *va)
174 {
175 	if (!pmap_md_direct_mapped_vaddr_p((vaddr_t)va))
176 		return false;
177 
178 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr((vaddr_t)va);
179 
180 	for (const paddr_t epa = pa + USPACE; pa < epa; pa += PAGE_SIZE) {
181 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
182 		KASSERT(pg != NULL);
183 		uvm_pagefree(pg);
184 	}
185 	return true;
186 }
187 #endif /* _LP64 */
188 
189 void
190 cpu_lwp_free(struct lwp *l, int proc)
191 {
192 
193 	(void)l;
194 }
195 
196 vaddr_t
197 cpu_lwp_pc(struct lwp *l)
198 {
199 	return l->l_md.md_utf->tf_pc;
200 }
201 
202 void
203 cpu_lwp_free2(struct lwp *l)
204 {
205 
206 	(void)l;
207 }
208 
209 /*
210  * Map a user I/O request into kernel virtual address space.
211  */
212 int
213 vmapbuf(struct buf *bp, vsize_t len)
214 {
215 	vaddr_t kva;	/* Kernel VA (new to) */
216 
217 	if ((bp->b_flags & B_PHYS) == 0)
218 		panic("vmapbuf");
219 
220 	vaddr_t uva = trunc_page((vaddr_t)bp->b_data);
221 	const vaddr_t off = (vaddr_t)bp->b_data - uva;
222 	len = round_page(off + len);
223 
224 	kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask,
225 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
226 	KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0);
227 	bp->b_saveaddr = bp->b_data;
228 	bp->b_data = (void *)(kva + off);
229 	struct pmap * const upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
230 	do {
231 		paddr_t pa;	/* physical address */
232 		if (pmap_extract(upmap, uva, &pa) == false)
233 			panic("vmapbuf: null page frame");
234 		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE,
235 		    PMAP_WIRED);
236 		uva += PAGE_SIZE;
237 		kva += PAGE_SIZE;
238 		len -= PAGE_SIZE;
239 	} while (len);
240 	pmap_update(pmap_kernel());
241 
242 	return 0;
243 }
244 
245 /*
246  * Unmap a previously-mapped user I/O request.
247  */
248 void
249 vunmapbuf(struct buf *bp, vsize_t len)
250 {
251 	vaddr_t kva;
252 
253 	KASSERT(bp->b_flags & B_PHYS);
254 
255 	kva = trunc_page((vaddr_t)bp->b_data);
256 	len = round_page((vaddr_t)bp->b_data - kva + len);
257 	pmap_kremove(kva, len);
258 	pmap_update(pmap_kernel());
259 	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
260 	bp->b_data = bp->b_saveaddr;
261 	bp->b_saveaddr = NULL;
262 }
263 
264 int
265 mm_md_physacc(paddr_t pa, vm_prot_t prot)
266 {
267         return (atop(pa) < physmem) ? 0 : EFAULT;
268 }
269 
270 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
271 bool
272 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
273 {
274 	if (pa >= physical_start && pa <= physical_end) {
275 		if (*vap)
276 			*vap = pmap_md_direct_map_paddr(pa);
277 		return true;
278 	}
279 
280 	return false;
281 }
282 #endif
283