xref: /openbsd-src/sys/uvm/uvm_mmap.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: uvm_mmap.c,v 1.163 2020/10/07 12:26:20 mpi Exp $	*/
2 /*	$NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993 The Regents of the University of California.
7  * Copyright (c) 1988 University of Utah.
8  *
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. All advertising materials mentioning features or use of this software
24  *    must display the following acknowledgement:
25  *      This product includes software developed by the Charles D. Cranor,
26  *	Washington University, University of California, Berkeley and
27  *	its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
45  *      @(#)vm_mmap.c   8.5 (Berkeley) 5/19/94
46  * from: Id: uvm_mmap.c,v 1.1.2.14 1998/01/05 21:04:26 chuck Exp
47  */
48 
49 /*
50  * uvm_mmap.c: system call interface into VM system, plus kernel vm_mmap
51  * function.
52  */
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/fcntl.h>
56 #include <sys/file.h>
57 #include <sys/filedesc.h>
58 #include <sys/resourcevar.h>
59 #include <sys/mman.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/malloc.h>
63 #include <sys/vnode.h>
64 #include <sys/conf.h>
65 #include <sys/signalvar.h>
66 #include <sys/syslog.h>
67 #include <sys/stat.h>
68 #include <sys/specdev.h>
69 #include <sys/stdint.h>
70 #include <sys/pledge.h>
71 #include <sys/unistd.h>		/* for KBIND* */
72 #include <sys/user.h>
73 
74 #include <machine/exec.h>	/* for __LDPGSZ */
75 
76 #include <sys/syscallargs.h>
77 
78 #include <uvm/uvm.h>
79 #include <uvm/uvm_device.h>
80 #include <uvm/uvm_vnode.h>
81 
82 int uvm_mmapanon(vm_map_t, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int,
83     vsize_t, struct proc *);
84 int uvm_mmapfile(vm_map_t, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int,
85     struct vnode *, voff_t, vsize_t, struct proc *);
86 
87 
88 /*
89  * Page align addr and size, returning EINVAL on wraparound.
90  */
91 #define ALIGN_ADDR(addr, size, pageoff)	do {				\
92 	pageoff = (addr & PAGE_MASK);					\
93 	if (pageoff != 0) {						\
94 		if (size > SIZE_MAX - pageoff)				\
95 			return (EINVAL);	/* wraparound */	\
96 		addr -= pageoff;					\
97 		size += pageoff;					\
98 	}								\
99 	if (size != 0) {						\
100 		size = (vsize_t)round_page(size);			\
101 		if (size == 0)						\
102 			return (EINVAL);	/* wraparound */	\
103 	}								\
104 } while (0)
105 
106 /*
107  * sys_mquery: provide mapping hints to applications that do fixed mappings
108  *
109  * flags: 0 or MAP_FIXED (MAP_FIXED - means that we insist on this addr and
110  *	don't care about PMAP_PREFER or such)
111  * addr: hint where we'd like to place the mapping.
112  * size: size of the mapping
113  * fd: fd of the file we want to map
114  * off: offset within the file
115  */
116 int
117 sys_mquery(struct proc *p, void *v, register_t *retval)
118 {
119 	struct sys_mquery_args /* {
120 		syscallarg(void *) addr;
121 		syscallarg(size_t) len;
122 		syscallarg(int) prot;
123 		syscallarg(int) flags;
124 		syscallarg(int) fd;
125 		syscallarg(long) pad;
126 		syscallarg(off_t) pos;
127 	} */ *uap = v;
128 	struct file *fp;
129 	voff_t uoff;
130 	int error;
131 	vaddr_t vaddr;
132 	int flags = 0;
133 	vsize_t size;
134 	vm_prot_t prot;
135 	int fd;
136 
137 	vaddr = (vaddr_t) SCARG(uap, addr);
138 	prot = SCARG(uap, prot);
139 	size = (vsize_t) SCARG(uap, len);
140 	fd = SCARG(uap, fd);
141 
142 	if ((prot & PROT_MASK) != prot)
143 		return (EINVAL);
144 
145 	if (SCARG(uap, flags) & MAP_FIXED)
146 		flags |= UVM_FLAG_FIXED;
147 
148 	if (fd >= 0) {
149 		if ((error = getvnode(p, fd, &fp)) != 0)
150 			return (error);
151 		uoff = SCARG(uap, pos);
152 	} else {
153 		fp = NULL;
154 		uoff = UVM_UNKNOWN_OFFSET;
155 	}
156 
157 	if (vaddr == 0)
158 		vaddr = uvm_map_hint(p->p_vmspace, prot, VM_MIN_ADDRESS,
159 		    VM_MAXUSER_ADDRESS);
160 
161 	error = uvm_map_mquery(&p->p_vmspace->vm_map, &vaddr, size, uoff,
162 	    flags);
163 	if (error == 0)
164 		*retval = (register_t)(vaddr);
165 
166 	if (fp != NULL)
167 		FRELE(fp, p);
168 	return (error);
169 }
170 
171 int	uvm_wxabort;
172 
173 /*
174  * W^X violations are only allowed on permitted filesystems.
175  */
176 static inline int
177 uvm_wxcheck(struct proc *p, char *call)
178 {
179 	struct process *pr = p->p_p;
180 	int wxallowed = (pr->ps_textvp->v_mount &&
181 	    (pr->ps_textvp->v_mount->mnt_flag & MNT_WXALLOWED));
182 
183 	if (wxallowed && (pr->ps_flags & PS_WXNEEDED))
184 		return (0);
185 
186 	if (uvm_wxabort) {
187 		/* Report W^X failures */
188 		if (pr->ps_wxcounter++ == 0)
189 			log(LOG_NOTICE, "%s(%d): %s W^X violation\n",
190 			    pr->ps_comm, pr->ps_pid, call);
191 		/* Send uncatchable SIGABRT for coredump */
192 		sigexit(p, SIGABRT);
193 	}
194 
195 	return (ENOTSUP);
196 }
197 
198 /*
199  * sys_mmap: mmap system call.
200  *
201  * => file offset and address may not be page aligned
202  *    - if MAP_FIXED, offset and address must have remainder mod PAGE_SIZE
203  *    - if address isn't page aligned the mapping starts at trunc_page(addr)
204  *      and the return value is adjusted up by the page offset.
205  */
206 int
207 sys_mmap(struct proc *p, void *v, register_t *retval)
208 {
209 	struct sys_mmap_args /* {
210 		syscallarg(void *) addr;
211 		syscallarg(size_t) len;
212 		syscallarg(int) prot;
213 		syscallarg(int) flags;
214 		syscallarg(int) fd;
215 		syscallarg(long) pad;
216 		syscallarg(off_t) pos;
217 	} */ *uap = v;
218 	vaddr_t addr;
219 	struct vattr va;
220 	off_t pos;
221 	vsize_t limit, pageoff, size;
222 	vm_prot_t prot, maxprot;
223 	int flags, fd;
224 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
225 	struct filedesc *fdp = p->p_fd;
226 	struct file *fp = NULL;
227 	struct vnode *vp;
228 	int error;
229 
230 	/* first, extract syscall args from the uap. */
231 	addr = (vaddr_t) SCARG(uap, addr);
232 	size = (vsize_t) SCARG(uap, len);
233 	prot = SCARG(uap, prot);
234 	flags = SCARG(uap, flags);
235 	fd = SCARG(uap, fd);
236 	pos = SCARG(uap, pos);
237 
238 	/*
239 	 * Validate the flags.
240 	 */
241 	if ((prot & PROT_MASK) != prot)
242 		return (EINVAL);
243 	if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC) &&
244 	    (error = uvm_wxcheck(p, "mmap")))
245 		return (error);
246 
247 	if ((flags & MAP_FLAGMASK) != flags)
248 		return (EINVAL);
249 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
250 		return (EINVAL);
251 	if ((flags & (MAP_FIXED|__MAP_NOREPLACE)) == __MAP_NOREPLACE)
252 		return (EINVAL);
253 	if (flags & MAP_STACK) {
254 		if ((flags & (MAP_ANON|MAP_PRIVATE)) != (MAP_ANON|MAP_PRIVATE))
255 			return (EINVAL);
256 		if (flags & ~(MAP_STACK|MAP_FIXED|MAP_ANON|MAP_PRIVATE))
257 			return (EINVAL);
258 		if (pos != 0)
259 			return (EINVAL);
260 		if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE))
261 			return (EINVAL);
262 	}
263 	if (size == 0)
264 		return (EINVAL);
265 
266 	error = pledge_protexec(p, prot);
267 	if (error)
268 		return (error);
269 
270 	/* align file position and save offset.  adjust size. */
271 	ALIGN_ADDR(pos, size, pageoff);
272 
273 	/* now check (MAP_FIXED) or get (!MAP_FIXED) the "addr" */
274 	if (flags & MAP_FIXED) {
275 		/* adjust address by the same amount as we did the offset */
276 		addr -= pageoff;
277 		if (addr & PAGE_MASK)
278 			return (EINVAL);		/* not page aligned */
279 
280 		if (addr > SIZE_MAX - size)
281 			return (EINVAL);		/* no wrapping! */
282 		if (VM_MAXUSER_ADDRESS > 0 &&
283 		    (addr + size) > VM_MAXUSER_ADDRESS)
284 			return (EINVAL);
285 		if (vm_min_address > 0 && addr < vm_min_address)
286 			return (EINVAL);
287 	}
288 
289 	/* check for file mappings (i.e. not anonymous) and verify file. */
290 	if ((flags & MAP_ANON) == 0) {
291 		KERNEL_LOCK();
292 		if ((fp = fd_getfile(fdp, fd)) == NULL) {
293 			error = EBADF;
294 			goto out;
295 		}
296 
297 		if (fp->f_type != DTYPE_VNODE) {
298 			error = ENODEV;		/* only mmap vnodes! */
299 			goto out;
300 		}
301 		vp = (struct vnode *)fp->f_data;	/* convert to vnode */
302 
303 		if (vp->v_type != VREG && vp->v_type != VCHR &&
304 		    vp->v_type != VBLK) {
305 			error = ENODEV; /* only REG/CHR/BLK support mmap */
306 			goto out;
307 		}
308 
309 		if (vp->v_type == VREG && (pos + size) < pos) {
310 			error = EINVAL;		/* no offset wrapping */
311 			goto out;
312 		}
313 
314 		/* special case: catch SunOS style /dev/zero */
315 		if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
316 			flags |= MAP_ANON;
317 			FRELE(fp, p);
318 			fp = NULL;
319 			KERNEL_UNLOCK();
320 			goto is_anon;
321 		}
322 
323 		/*
324 		 * Old programs may not select a specific sharing type, so
325 		 * default to an appropriate one.
326 		 */
327 		if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
328 #if defined(DEBUG)
329 			printf("WARNING: defaulted mmap() share type to"
330 			    " %s (pid %d comm %s)\n",
331 			    vp->v_type == VCHR ? "MAP_SHARED" : "MAP_PRIVATE",
332 			    p->p_p->ps_pid, p->p_p->ps_comm);
333 #endif
334 			if (vp->v_type == VCHR)
335 				flags |= MAP_SHARED;	/* for a device */
336 			else
337 				flags |= MAP_PRIVATE;	/* for a file */
338 		}
339 
340 		/*
341 		 * MAP_PRIVATE device mappings don't make sense (and aren't
342 		 * supported anyway).  However, some programs rely on this,
343 		 * so just change it to MAP_SHARED.
344 		 */
345 		if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
346 			flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
347 		}
348 
349 		/* now check protection */
350 		maxprot = PROT_EXEC;
351 
352 		/* check read access */
353 		if (fp->f_flag & FREAD)
354 			maxprot |= PROT_READ;
355 		else if (prot & PROT_READ) {
356 			error = EACCES;
357 			goto out;
358 		}
359 
360 		/* check write access, shared case first */
361 		if (flags & MAP_SHARED) {
362 			/*
363 			 * if the file is writable, only add PROT_WRITE to
364 			 * maxprot if the file is not immutable, append-only.
365 			 * otherwise, if we have asked for PROT_WRITE, return
366 			 * EPERM.
367 			 */
368 			if (fp->f_flag & FWRITE) {
369 				error = VOP_GETATTR(vp, &va, p->p_ucred, p);
370 				if (error)
371 					goto out;
372 				if ((va.va_flags & (IMMUTABLE|APPEND)) == 0)
373 					maxprot |= PROT_WRITE;
374 				else if (prot & PROT_WRITE) {
375 					error = EPERM;
376 					goto out;
377 				}
378 			} else if (prot & PROT_WRITE) {
379 				error = EACCES;
380 				goto out;
381 			}
382 		} else {
383 			/* MAP_PRIVATE mappings can always write to */
384 			maxprot |= PROT_WRITE;
385 		}
386 		if ((flags & __MAP_NOFAULT) != 0 ||
387 		    ((flags & MAP_PRIVATE) != 0 && (prot & PROT_WRITE) != 0)) {
388 			limit = lim_cur(RLIMIT_DATA);
389 			if (limit < size ||
390 			    limit - size < ptoa(p->p_vmspace->vm_dused)) {
391 				error = ENOMEM;
392 				goto out;
393 			}
394 		}
395 		error = uvm_mmapfile(&p->p_vmspace->vm_map, &addr, size, prot,
396 		    maxprot, flags, vp, pos, lim_cur(RLIMIT_MEMLOCK), p);
397 		FRELE(fp, p);
398 		KERNEL_UNLOCK();
399 	} else {		/* MAP_ANON case */
400 		if (fd != -1)
401 			return EINVAL;
402 
403 is_anon:	/* label for SunOS style /dev/zero */
404 
405 		/* __MAP_NOFAULT only makes sense with a backing object */
406 		if ((flags & __MAP_NOFAULT) != 0)
407 			return EINVAL;
408 
409 		if (prot != PROT_NONE || (flags & MAP_SHARED)) {
410 			limit = lim_cur(RLIMIT_DATA);
411 			if (limit < size ||
412 			    limit - size < ptoa(p->p_vmspace->vm_dused)) {
413 				return ENOMEM;
414 			}
415 		}
416 
417 		/*
418 		 * We've been treating (MAP_SHARED|MAP_PRIVATE) == 0 as
419 		 * MAP_PRIVATE, so make that clear.
420 		 */
421 		if ((flags & MAP_SHARED) == 0)
422 			flags |= MAP_PRIVATE;
423 
424 		maxprot = PROT_MASK;
425 		error = uvm_mmapanon(&p->p_vmspace->vm_map, &addr, size, prot,
426 		    maxprot, flags, lim_cur(RLIMIT_MEMLOCK), p);
427 	}
428 
429 	if (error == 0)
430 		/* remember to add offset */
431 		*retval = (register_t)(addr + pageoff);
432 
433 	return (error);
434 
435 out:
436 	KERNEL_UNLOCK();
437 	if (fp)
438 		FRELE(fp, p);
439 	return (error);
440 }
441 
442 /*
443  * sys_msync: the msync system call (a front-end for flush)
444  */
445 
446 int
447 sys_msync(struct proc *p, void *v, register_t *retval)
448 {
449 	struct sys_msync_args /* {
450 		syscallarg(void *) addr;
451 		syscallarg(size_t) len;
452 		syscallarg(int) flags;
453 	} */ *uap = v;
454 	vaddr_t addr;
455 	vsize_t size, pageoff;
456 	vm_map_t map;
457 	int flags, uvmflags;
458 
459 	/* extract syscall args from the uap */
460 	addr = (vaddr_t)SCARG(uap, addr);
461 	size = (vsize_t)SCARG(uap, len);
462 	flags = SCARG(uap, flags);
463 
464 	/* sanity check flags */
465 	if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
466 			(flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
467 			(flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
468 		return (EINVAL);
469 	if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
470 		flags |= MS_SYNC;
471 
472 	/* align the address to a page boundary, and adjust the size accordingly */
473 	ALIGN_ADDR(addr, size, pageoff);
474 	if (addr > SIZE_MAX - size)
475 		return (EINVAL);		/* disallow wrap-around. */
476 
477 	/* get map */
478 	map = &p->p_vmspace->vm_map;
479 
480 	/* translate MS_ flags into PGO_ flags */
481 	uvmflags = PGO_CLEANIT;
482 	if (flags & MS_INVALIDATE)
483 		uvmflags |= PGO_FREE;
484 	if (flags & MS_SYNC)
485 		uvmflags |= PGO_SYNCIO;
486 	else
487 		uvmflags |= PGO_SYNCIO;	 /* XXXCDC: force sync for now! */
488 
489 	return (uvm_map_clean(map, addr, addr+size, uvmflags));
490 }
491 
492 /*
493  * sys_munmap: unmap a users memory
494  */
495 int
496 sys_munmap(struct proc *p, void *v, register_t *retval)
497 {
498 	struct sys_munmap_args /* {
499 		syscallarg(void *) addr;
500 		syscallarg(size_t) len;
501 	} */ *uap = v;
502 	vaddr_t addr;
503 	vsize_t size, pageoff;
504 	vm_map_t map;
505 	vaddr_t vm_min_address = VM_MIN_ADDRESS;
506 	struct uvm_map_deadq dead_entries;
507 
508 	/* get syscall args... */
509 	addr = (vaddr_t) SCARG(uap, addr);
510 	size = (vsize_t) SCARG(uap, len);
511 
512 	/* align address to a page boundary, and adjust size accordingly */
513 	ALIGN_ADDR(addr, size, pageoff);
514 
515 	/*
516 	 * Check for illegal addresses.  Watch out for address wrap...
517 	 * Note that VM_*_ADDRESS are not constants due to casts (argh).
518 	 */
519 	if (addr > SIZE_MAX - size)
520 		return (EINVAL);
521 	if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
522 		return (EINVAL);
523 	if (vm_min_address > 0 && addr < vm_min_address)
524 		return (EINVAL);
525 	map = &p->p_vmspace->vm_map;
526 
527 
528 	vm_map_lock(map);	/* lock map so we can checkprot */
529 
530 	/*
531 	 * interesting system call semantic: make sure entire range is
532 	 * allocated before allowing an unmap.
533 	 */
534 	if (!uvm_map_checkprot(map, addr, addr + size, PROT_NONE)) {
535 		vm_map_unlock(map);
536 		return (EINVAL);
537 	}
538 
539 	TAILQ_INIT(&dead_entries);
540 	uvm_unmap_remove(map, addr, addr + size, &dead_entries, FALSE, TRUE);
541 	vm_map_unlock(map);	/* and unlock */
542 
543 	uvm_unmap_detach(&dead_entries, 0);
544 
545 	return (0);
546 }
547 
548 /*
549  * sys_mprotect: the mprotect system call
550  */
551 int
552 sys_mprotect(struct proc *p, void *v, register_t *retval)
553 {
554 	struct sys_mprotect_args /* {
555 		syscallarg(void *) addr;
556 		syscallarg(size_t) len;
557 		syscallarg(int) prot;
558 	} */ *uap = v;
559 	vaddr_t addr;
560 	vsize_t size, pageoff;
561 	vm_prot_t prot;
562 	int error;
563 
564 	/*
565 	 * extract syscall args from uap
566 	 */
567 
568 	addr = (vaddr_t)SCARG(uap, addr);
569 	size = (vsize_t)SCARG(uap, len);
570 	prot = SCARG(uap, prot);
571 
572 	if ((prot & PROT_MASK) != prot)
573 		return (EINVAL);
574 	if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC) &&
575 	    (error = uvm_wxcheck(p, "mprotect")))
576 		return (error);
577 
578 	error = pledge_protexec(p, prot);
579 	if (error)
580 		return (error);
581 
582 	/*
583 	 * align the address to a page boundary, and adjust the size accordingly
584 	 */
585 	ALIGN_ADDR(addr, size, pageoff);
586 	if (addr > SIZE_MAX - size)
587 		return (EINVAL);		/* disallow wrap-around. */
588 
589 	return (uvm_map_protect(&p->p_vmspace->vm_map, addr, addr+size,
590 	    prot, FALSE));
591 }
592 
593 /*
594  * sys_msyscall: the msyscall system call
595  */
596 int
597 sys_msyscall(struct proc *p, void *v, register_t *retval)
598 {
599 	struct sys_msyscall_args /* {
600 		syscallarg(void *) addr;
601 		syscallarg(size_t) len;
602 	} */ *uap = v;
603 	vaddr_t addr;
604 	vsize_t size, pageoff;
605 
606 	addr = (vaddr_t)SCARG(uap, addr);
607 	size = (vsize_t)SCARG(uap, len);
608 
609 	/*
610 	 * align the address to a page boundary, and adjust the size accordingly
611 	 */
612 	ALIGN_ADDR(addr, size, pageoff);
613 	if (addr > SIZE_MAX - size)
614 		return (EINVAL);		/* disallow wrap-around. */
615 
616 	return (uvm_map_syscall(&p->p_vmspace->vm_map, addr, addr+size));
617 }
618 
619 /*
620  * sys_minherit: the minherit system call
621  */
622 int
623 sys_minherit(struct proc *p, void *v, register_t *retval)
624 {
625 	struct sys_minherit_args /* {
626 		syscallarg(void *) addr;
627 		syscallarg(size_t) len;
628 		syscallarg(int) inherit;
629 	} */ *uap = v;
630 	vaddr_t addr;
631 	vsize_t size, pageoff;
632 	vm_inherit_t inherit;
633 
634 	addr = (vaddr_t)SCARG(uap, addr);
635 	size = (vsize_t)SCARG(uap, len);
636 	inherit = SCARG(uap, inherit);
637 
638 	/*
639 	 * align the address to a page boundary, and adjust the size accordingly
640 	 */
641 	ALIGN_ADDR(addr, size, pageoff);
642 	if (addr > SIZE_MAX - size)
643 		return (EINVAL);		/* disallow wrap-around. */
644 
645 	return (uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,
646 	    inherit));
647 }
648 
649 /*
650  * sys_madvise: give advice about memory usage.
651  */
652 /* ARGSUSED */
653 int
654 sys_madvise(struct proc *p, void *v, register_t *retval)
655 {
656 	struct sys_madvise_args /* {
657 		syscallarg(void *) addr;
658 		syscallarg(size_t) len;
659 		syscallarg(int) behav;
660 	} */ *uap = v;
661 	vaddr_t addr;
662 	vsize_t size, pageoff;
663 	int advice, error;
664 
665 	addr = (vaddr_t)SCARG(uap, addr);
666 	size = (vsize_t)SCARG(uap, len);
667 	advice = SCARG(uap, behav);
668 
669 	/*
670 	 * align the address to a page boundary, and adjust the size accordingly
671 	 */
672 	ALIGN_ADDR(addr, size, pageoff);
673 	if (addr > SIZE_MAX - size)
674 		return (EINVAL);		/* disallow wrap-around. */
675 
676 	switch (advice) {
677 	case MADV_NORMAL:
678 	case MADV_RANDOM:
679 	case MADV_SEQUENTIAL:
680 		error = uvm_map_advice(&p->p_vmspace->vm_map, addr,
681 		    addr + size, advice);
682 		break;
683 
684 	case MADV_WILLNEED:
685 		/*
686 		 * Activate all these pages, pre-faulting them in if
687 		 * necessary.
688 		 */
689 		/*
690 		 * XXX IMPLEMENT ME.
691 		 * Should invent a "weak" mode for uvm_fault()
692 		 * which would only do the PGO_LOCKED pgo_get().
693 		 */
694 		return (0);
695 
696 	case MADV_DONTNEED:
697 		/*
698 		 * Deactivate all these pages.  We don't need them
699 		 * any more.  We don't, however, toss the data in
700 		 * the pages.
701 		 */
702 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
703 		    PGO_DEACTIVATE);
704 		break;
705 
706 	case MADV_FREE:
707 		/*
708 		 * These pages contain no valid data, and may be
709 		 * garbage-collected.  Toss all resources, including
710 		 * any swap space in use.
711 		 */
712 		error = uvm_map_clean(&p->p_vmspace->vm_map, addr, addr + size,
713 		    PGO_FREE);
714 		break;
715 
716 	case MADV_SPACEAVAIL:
717 		/*
718 		 * XXXMRG What is this?  I think it's:
719 		 *
720 		 *	Ensure that we have allocated backing-store
721 		 *	for these pages.
722 		 *
723 		 * This is going to require changes to the page daemon,
724 		 * as it will free swap space allocated to pages in core.
725 		 * There's also what to do for device/file/anonymous memory.
726 		 */
727 		return (EINVAL);
728 
729 	default:
730 		return (EINVAL);
731 	}
732 
733 	return (error);
734 }
735 
736 /*
737  * sys_mlock: memory lock
738  */
739 
740 int
741 sys_mlock(struct proc *p, void *v, register_t *retval)
742 {
743 	struct sys_mlock_args /* {
744 		syscallarg(const void *) addr;
745 		syscallarg(size_t) len;
746 	} */ *uap = v;
747 	vaddr_t addr;
748 	vsize_t size, pageoff;
749 	int error;
750 
751 	/* extract syscall args from uap */
752 	addr = (vaddr_t)SCARG(uap, addr);
753 	size = (vsize_t)SCARG(uap, len);
754 
755 	/* align address to a page boundary and adjust size accordingly */
756 	ALIGN_ADDR(addr, size, pageoff);
757 	if (addr > SIZE_MAX - size)
758 		return (EINVAL);		/* disallow wrap-around. */
759 
760 	if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
761 		return (EAGAIN);
762 
763 #ifdef pmap_wired_count
764 	if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
765 			lim_cur(RLIMIT_MEMLOCK))
766 		return (EAGAIN);
767 #else
768 	if ((error = suser(p)) != 0)
769 		return (error);
770 #endif
771 
772 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
773 	    0);
774 	return (error == 0 ? 0 : ENOMEM);
775 }
776 
777 /*
778  * sys_munlock: unlock wired pages
779  */
780 
781 int
782 sys_munlock(struct proc *p, void *v, register_t *retval)
783 {
784 	struct sys_munlock_args /* {
785 		syscallarg(const void *) addr;
786 		syscallarg(size_t) len;
787 	} */ *uap = v;
788 	vaddr_t addr;
789 	vsize_t size, pageoff;
790 	int error;
791 
792 	/* extract syscall args from uap */
793 	addr = (vaddr_t)SCARG(uap, addr);
794 	size = (vsize_t)SCARG(uap, len);
795 
796 	/* align address to a page boundary, and adjust size accordingly */
797 	ALIGN_ADDR(addr, size, pageoff);
798 	if (addr > SIZE_MAX - size)
799 		return (EINVAL);		/* disallow wrap-around. */
800 
801 #ifndef pmap_wired_count
802 	if ((error = suser(p)) != 0)
803 		return (error);
804 #endif
805 
806 	error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
807 	    0);
808 	return (error == 0 ? 0 : ENOMEM);
809 }
810 
811 /*
812  * sys_mlockall: lock all pages mapped into an address space.
813  */
814 int
815 sys_mlockall(struct proc *p, void *v, register_t *retval)
816 {
817 	struct sys_mlockall_args /* {
818 		syscallarg(int) flags;
819 	} */ *uap = v;
820 	int error, flags;
821 
822 	flags = SCARG(uap, flags);
823 
824 	if (flags == 0 ||
825 	    (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
826 		return (EINVAL);
827 
828 #ifndef pmap_wired_count
829 	if ((error = suser(p)) != 0)
830 		return (error);
831 #endif
832 
833 	error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
834 	    lim_cur(RLIMIT_MEMLOCK));
835 	if (error != 0 && error != ENOMEM)
836 		return (EAGAIN);
837 	return (error);
838 }
839 
840 /*
841  * sys_munlockall: unlock all pages mapped into an address space.
842  */
843 int
844 sys_munlockall(struct proc *p, void *v, register_t *retval)
845 {
846 
847 	(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
848 	return (0);
849 }
850 
851 /*
852  * common code for mmapanon and mmapfile to lock a mmaping
853  */
854 int
855 uvm_mmaplock(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
856     vsize_t locklimit)
857 {
858 	int error;
859 
860 	/*
861 	 * POSIX 1003.1b -- if our address space was configured
862 	 * to lock all future mappings, wire the one we just made.
863 	 */
864 	if (prot == PROT_NONE) {
865 		/*
866 		 * No more work to do in this case.
867 		 */
868 		return (0);
869 	}
870 
871 	vm_map_lock(map);
872 	if (map->flags & VM_MAP_WIREFUTURE) {
873 		KERNEL_LOCK();
874 		if ((atop(size) + uvmexp.wired) > uvmexp.wiredmax
875 #ifdef pmap_wired_count
876 		    || (locklimit != 0 && (size +
877 			 ptoa(pmap_wired_count(vm_map_pmap(map)))) >
878 			locklimit)
879 #endif
880 		) {
881 			error = ENOMEM;
882 			vm_map_unlock(map);
883 			/* unmap the region! */
884 			uvm_unmap(map, *addr, *addr + size);
885 			KERNEL_UNLOCK();
886 			return (error);
887 		}
888 		/*
889 		 * uvm_map_pageable() always returns the map
890 		 * unlocked.
891 		 */
892 		error = uvm_map_pageable(map, *addr, *addr + size,
893 		    FALSE, UVM_LK_ENTER);
894 		if (error != 0) {
895 			/* unmap the region! */
896 			uvm_unmap(map, *addr, *addr + size);
897 			KERNEL_UNLOCK();
898 			return (error);
899 		}
900 		KERNEL_UNLOCK();
901 		return (0);
902 	}
903 	vm_map_unlock(map);
904 	return (0);
905 }
906 
907 /*
908  * uvm_mmapanon: internal version of mmap for anons
909  *
910  * - used by sys_mmap
911  */
912 int
913 uvm_mmapanon(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
914     vm_prot_t maxprot, int flags, vsize_t locklimit, struct proc *p)
915 {
916 	int error;
917 	int advice = MADV_NORMAL;
918 	unsigned int uvmflag = 0;
919 	vsize_t align = 0;	/* userland page size */
920 
921 	/*
922 	 * for non-fixed mappings, round off the suggested address.
923 	 * for fixed mappings, check alignment and zap old mappings.
924 	 */
925 	if ((flags & MAP_FIXED) == 0) {
926 		*addr = round_page(*addr);	/* round */
927 	} else {
928 		if (*addr & PAGE_MASK)
929 			return(EINVAL);
930 
931 		uvmflag |= UVM_FLAG_FIXED;
932 		if ((flags & __MAP_NOREPLACE) == 0)
933 			uvmflag |= UVM_FLAG_UNMAP;
934 	}
935 
936 	if ((flags & MAP_FIXED) == 0 && size >= __LDPGSZ)
937 		align = __LDPGSZ;
938 	if ((flags & MAP_SHARED) == 0)
939 		/* XXX: defer amap create */
940 		uvmflag |= UVM_FLAG_COPYONW;
941 	else
942 		/* shared: create amap now */
943 		uvmflag |= UVM_FLAG_OVERLAY;
944 	if (flags & MAP_STACK)
945 		uvmflag |= UVM_FLAG_STACK;
946 	if (flags & MAP_CONCEAL)
947 		uvmflag |= UVM_FLAG_CONCEAL;
948 
949 	/* set up mapping flags */
950 	uvmflag = UVM_MAPFLAG(prot, maxprot,
951 	    (flags & MAP_SHARED) ? MAP_INHERIT_SHARE : MAP_INHERIT_COPY,
952 	    advice, uvmflag);
953 
954 	error = uvm_mapanon(map, addr, size, align, uvmflag);
955 
956 	if (error == 0)
957 		error = uvm_mmaplock(map, addr, size, prot, locklimit);
958 	return error;
959 }
960 
961 /*
962  * uvm_mmapfile: internal version of mmap for non-anons
963  *
964  * - used by sys_mmap
965  * - caller must page-align the file offset
966  */
967 int
968 uvm_mmapfile(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
969     vm_prot_t maxprot, int flags, struct vnode *vp, voff_t foff,
970     vsize_t locklimit, struct proc *p)
971 {
972 	struct uvm_object *uobj;
973 	int error;
974 	int advice = MADV_NORMAL;
975 	unsigned int uvmflag = 0;
976 	vsize_t align = 0;	/* userland page size */
977 
978 	/*
979 	 * for non-fixed mappings, round off the suggested address.
980 	 * for fixed mappings, check alignment and zap old mappings.
981 	 */
982 	if ((flags & MAP_FIXED) == 0) {
983 		*addr = round_page(*addr);	/* round */
984 	} else {
985 		if (*addr & PAGE_MASK)
986 			return(EINVAL);
987 
988 		uvmflag |= UVM_FLAG_FIXED;
989 		if ((flags & __MAP_NOREPLACE) == 0)
990 			uvmflag |= UVM_FLAG_UNMAP;
991 	}
992 
993 	/*
994 	 * attach to underlying vm object.
995 	 */
996 	if (vp->v_type != VCHR) {
997 		uobj = uvn_attach(vp, (flags & MAP_SHARED) ?
998 		   maxprot : (maxprot & ~PROT_WRITE));
999 
1000 		/*
1001 		 * XXXCDC: hack from old code
1002 		 * don't allow vnodes which have been mapped
1003 		 * shared-writeable to persist [forces them to be
1004 		 * flushed out when last reference goes].
1005 		 * XXXCDC: interesting side effect: avoids a bug.
1006 		 * note that in WRITE [ufs_readwrite.c] that we
1007 		 * allocate buffer, uncache, and then do the write.
1008 		 * the problem with this is that if the uncache causes
1009 		 * VM data to be flushed to the same area of the file
1010 		 * we are writing to... in that case we've got the
1011 		 * buffer locked and our process goes to sleep forever.
1012 		 *
1013 		 * XXXCDC: checking maxprot protects us from the
1014 		 * "persistbug" program but this is not a long term
1015 		 * solution.
1016 		 *
1017 		 * XXXCDC: we don't bother calling uncache with the vp
1018 		 * VOP_LOCKed since we know that we are already
1019 		 * holding a valid reference to the uvn (from the
1020 		 * uvn_attach above), and thus it is impossible for
1021 		 * the uncache to kill the uvn and trigger I/O.
1022 		 */
1023 		if (flags & MAP_SHARED) {
1024 			if ((prot & PROT_WRITE) ||
1025 			    (maxprot & PROT_WRITE)) {
1026 				uvm_vnp_uncache(vp);
1027 			}
1028 		}
1029 	} else {
1030 		uobj = udv_attach(vp->v_rdev,
1031 		    (flags & MAP_SHARED) ? maxprot :
1032 		    (maxprot & ~PROT_WRITE), foff, size);
1033 		/*
1034 		 * XXX Some devices don't like to be mapped with
1035 		 * XXX PROT_EXEC, but we don't really have a
1036 		 * XXX better way of handling this, right now
1037 		 */
1038 		if (uobj == NULL && (prot & PROT_EXEC) == 0) {
1039 			maxprot &= ~PROT_EXEC;
1040 			uobj = udv_attach(vp->v_rdev,
1041 			    (flags & MAP_SHARED) ? maxprot :
1042 			    (maxprot & ~PROT_WRITE), foff, size);
1043 		}
1044 		advice = MADV_RANDOM;
1045 	}
1046 
1047 	if (uobj == NULL)
1048 		return((vp->v_type == VREG) ? ENOMEM : EINVAL);
1049 
1050 	if ((flags & MAP_SHARED) == 0)
1051 		uvmflag |= UVM_FLAG_COPYONW;
1052 	if (flags & __MAP_NOFAULT)
1053 		uvmflag |= (UVM_FLAG_NOFAULT | UVM_FLAG_OVERLAY);
1054 	if (flags & MAP_STACK)
1055 		uvmflag |= UVM_FLAG_STACK;
1056 	if (flags & MAP_CONCEAL)
1057 		uvmflag |= UVM_FLAG_CONCEAL;
1058 
1059 	/* set up mapping flags */
1060 	uvmflag = UVM_MAPFLAG(prot, maxprot,
1061 	    (flags & MAP_SHARED) ? MAP_INHERIT_SHARE : MAP_INHERIT_COPY,
1062 	    advice, uvmflag);
1063 
1064 	error = uvm_map(map, addr, size, uobj, foff, align, uvmflag);
1065 
1066 	if (error == 0)
1067 		return uvm_mmaplock(map, addr, size, prot, locklimit);
1068 
1069 	/* errors: first detach from the uobj, if any.  */
1070 	if (uobj)
1071 		uobj->pgops->pgo_detach(uobj);
1072 
1073 	return (error);
1074 }
1075 
1076 /* an address that can't be in userspace or kernelspace */
1077 #define	BOGO_PC	(u_long)-1
1078 int
1079 sys_kbind(struct proc *p, void *v, register_t *retval)
1080 {
1081 	struct sys_kbind_args /* {
1082 		syscallarg(const struct __kbind *) param;
1083 		syscallarg(size_t) psize;
1084 		syscallarg(uint64_t) proc_cookie;
1085 	} */ *uap = v;
1086 	const struct __kbind *paramp;
1087 	union {
1088 		struct __kbind uk[KBIND_BLOCK_MAX];
1089 		char upad[KBIND_BLOCK_MAX * sizeof(*paramp) + KBIND_DATA_MAX];
1090 	} param;
1091 	struct uvm_map_deadq dead_entries;
1092 	struct process *pr = p->p_p;
1093 	const char *data;
1094 	vaddr_t baseva, last_baseva, endva, pageoffset, kva;
1095 	size_t psize, s;
1096 	u_long pc;
1097 	int count, i;
1098 	int error;
1099 
1100 	/*
1101 	 * extract syscall args from uap
1102 	 */
1103 	paramp = SCARG(uap, param);
1104 	psize = SCARG(uap, psize);
1105 
1106 	/* a NULL paramp disables the syscall for the process */
1107 	if (paramp == NULL) {
1108 		pr->ps_kbind_addr = BOGO_PC;
1109 		return (0);
1110 	}
1111 
1112 	/* security checks */
1113 	pc = PROC_PC(p);
1114 	if (pr->ps_kbind_addr == 0) {
1115 		pr->ps_kbind_addr = pc;
1116 		pr->ps_kbind_cookie = SCARG(uap, proc_cookie);
1117 	} else if (pc != pr->ps_kbind_addr || pc == BOGO_PC)
1118 		sigexit(p, SIGILL);
1119 	else if (pr->ps_kbind_cookie != SCARG(uap, proc_cookie))
1120 		sigexit(p, SIGILL);
1121 	if (psize < sizeof(struct __kbind) || psize > sizeof(param))
1122 		return (EINVAL);
1123 	if ((error = copyin(paramp, &param, psize)))
1124 		return (error);
1125 
1126 	/*
1127 	 * The param argument points to an array of __kbind structures
1128 	 * followed by the corresponding new data areas for them.  Verify
1129 	 * that the sizes in the __kbind structures add up to the total
1130 	 * size and find the start of the new area.
1131 	 */
1132 	paramp = &param.uk[0];
1133 	s = psize;
1134 	for (count = 0; s > 0 && count < KBIND_BLOCK_MAX; count++) {
1135 		if (s < sizeof(*paramp))
1136 			return (EINVAL);
1137 		s -= sizeof(*paramp);
1138 
1139 		baseva = (vaddr_t)paramp[count].kb_addr;
1140 		endva = baseva + paramp[count].kb_size - 1;
1141 		if (paramp[count].kb_addr == NULL ||
1142 		    paramp[count].kb_size == 0 ||
1143 		    paramp[count].kb_size > KBIND_DATA_MAX ||
1144 		    baseva >= VM_MAXUSER_ADDRESS ||
1145 		    endva >= VM_MAXUSER_ADDRESS ||
1146 		    trunc_page(baseva) != trunc_page(endva) ||
1147 		    s < paramp[count].kb_size)
1148 			return (EINVAL);
1149 
1150 		s -= paramp[count].kb_size;
1151 	}
1152 	if (s > 0)
1153 		return (EINVAL);
1154 	data = (const char *)&paramp[count];
1155 
1156 	/* all looks good, so do the bindings */
1157 	last_baseva = VM_MAXUSER_ADDRESS;
1158 	kva = 0;
1159 	TAILQ_INIT(&dead_entries);
1160 	for (i = 0; i < count; i++) {
1161 		baseva = (vaddr_t)paramp[i].kb_addr;
1162 		pageoffset = baseva & PAGE_MASK;
1163 		baseva = trunc_page(baseva);
1164 
1165 		/* make sure sure the desired page is mapped into kernel_map */
1166 		if (baseva != last_baseva) {
1167 			if (kva != 0) {
1168 				vm_map_lock(kernel_map);
1169 				uvm_unmap_remove(kernel_map, kva,
1170 				    kva+PAGE_SIZE, &dead_entries, FALSE, TRUE);
1171 				vm_map_unlock(kernel_map);
1172 				kva = 0;
1173 			}
1174 			if ((error = uvm_map_extract(&p->p_vmspace->vm_map,
1175 			    baseva, PAGE_SIZE, &kva, UVM_EXTRACT_FIXPROT)))
1176 				break;
1177 			last_baseva = baseva;
1178 		}
1179 
1180 		/* do the update */
1181 		if ((error = kcopy(data, (char *)kva + pageoffset,
1182 		    paramp[i].kb_size)))
1183 			break;
1184 		data += paramp[i].kb_size;
1185 	}
1186 
1187 	if (kva != 0) {
1188 		vm_map_lock(kernel_map);
1189 		uvm_unmap_remove(kernel_map, kva, kva+PAGE_SIZE,
1190 		    &dead_entries, FALSE, TRUE);
1191 		vm_map_unlock(kernel_map);
1192 	}
1193 	uvm_unmap_detach(&dead_entries, AMAP_REFALL);
1194 
1195 	return (error);
1196 }
1197