1 /* $NetBSD: linux_misc.c,v 1.260 2023/07/29 15:04:29 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1995, 1998, 1999, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Frank van der Linden and Eric Haszlakiewicz; by Jason R. Thorpe 9 * of the Numerical Aerospace Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Linux compatibility module. Try to deal with various Linux system calls. 35 */ 36 37 /* 38 * These functions have been moved to multiarch to allow 39 * selection of which machines include them to be 40 * determined by the individual files.linux_<arch> files. 41 * 42 * Function in multiarch: 43 * linux_sys_break : linux_break.c 44 * linux_sys_alarm : linux_misc_notalpha.c 45 * linux_sys_getresgid : linux_misc_notalpha.c 46 * linux_sys_nice : linux_misc_notalpha.c 47 * linux_sys_readdir : linux_misc_notalpha.c 48 * linux_sys_setresgid : linux_misc_notalpha.c 49 * linux_sys_time : linux_misc_notalpha.c 50 * linux_sys_utime : linux_misc_notalpha.c 51 * linux_sys_waitpid : linux_misc_notalpha.c 52 * linux_sys_old_mmap : linux_oldmmap.c 53 * linux_sys_oldolduname : linux_oldolduname.c 54 * linux_sys_oldselect : linux_oldselect.c 55 * linux_sys_olduname : linux_olduname.c 56 * linux_sys_pipe : linux_pipe.c 57 */ 58 59 #include <sys/cdefs.h> 60 __KERNEL_RCSID(0, "$NetBSD: linux_misc.c,v 1.260 2023/07/29 15:04:29 christos Exp $"); 61 62 #include <sys/param.h> 63 #include <sys/systm.h> 64 #include <sys/namei.h> 65 #include <sys/proc.h> 66 #include <sys/dirent.h> 67 #include <sys/epoll.h> 68 #include <sys/eventfd.h> 69 #include <sys/file.h> 70 #include <sys/stat.h> 71 #include <sys/filedesc.h> 72 #include <sys/ioctl.h> 73 #include <sys/kernel.h> 74 #include <sys/malloc.h> 75 #include <sys/mbuf.h> 76 #include <sys/mman.h> 77 #include <sys/mount.h> 78 #include <sys/poll.h> 79 #include <sys/prot.h> 80 #include <sys/reboot.h> 81 #include <sys/resource.h> 82 #include <sys/resourcevar.h> 83 #include <sys/select.h> 84 #include <sys/signal.h> 85 #include <sys/signalvar.h> 86 #include <sys/socket.h> 87 #include <sys/time.h> 88 #include <sys/times.h> 89 #include <sys/vnode.h> 90 #include <sys/uio.h> 91 #include <sys/wait.h> 92 #include <sys/utsname.h> 93 #include <sys/unistd.h> 94 #include <sys/vfs_syscalls.h> 95 #include <sys/swap.h> /* for SWAP_ON */ 96 #include <sys/sysctl.h> /* for KERN_DOMAINNAME */ 97 #include <sys/kauth.h> 98 #include <sys/futex.h> 99 100 #include <sys/ptrace.h> 101 #include <machine/ptrace.h> 102 103 #include <sys/syscall.h> 104 #include <sys/syscallargs.h> 105 106 #include <compat/sys/resource.h> 107 108 #include <compat/linux/common/linux_machdep.h> 109 #include <compat/linux/common/linux_types.h> 110 #include <compat/linux/common/linux_signal.h> 111 #include <compat/linux/common/linux_ipc.h> 112 #include <compat/linux/common/linux_sem.h> 113 114 #include <compat/linux/common/linux_fcntl.h> 115 #include <compat/linux/common/linux_mmap.h> 116 #include <compat/linux/common/linux_dirent.h> 117 #include <compat/linux/common/linux_util.h> 118 #include <compat/linux/common/linux_misc.h> 119 #include <compat/linux/common/linux_statfs.h> 120 #include <compat/linux/common/linux_limit.h> 121 #include <compat/linux/common/linux_ptrace.h> 122 #include <compat/linux/common/linux_reboot.h> 123 #include <compat/linux/common/linux_emuldata.h> 124 #include <compat/linux/common/linux_sched.h> 125 126 #include <compat/linux/linux_syscallargs.h> 127 128 const int linux_ptrace_request_map[] = { 129 LINUX_PTRACE_TRACEME, PT_TRACE_ME, 130 LINUX_PTRACE_PEEKTEXT, PT_READ_I, 131 LINUX_PTRACE_PEEKDATA, PT_READ_D, 132 LINUX_PTRACE_POKETEXT, PT_WRITE_I, 133 LINUX_PTRACE_POKEDATA, PT_WRITE_D, 134 LINUX_PTRACE_CONT, PT_CONTINUE, 135 LINUX_PTRACE_KILL, PT_KILL, 136 LINUX_PTRACE_ATTACH, PT_ATTACH, 137 LINUX_PTRACE_DETACH, PT_DETACH, 138 # ifdef PT_STEP 139 LINUX_PTRACE_SINGLESTEP, PT_STEP, 140 # endif 141 LINUX_PTRACE_SYSCALL, PT_SYSCALL, 142 -1 143 }; 144 145 const struct linux_mnttypes linux_fstypes[] = { 146 { MOUNT_FFS, LINUX_DEFAULT_SUPER_MAGIC }, 147 { MOUNT_NFS, LINUX_NFS_SUPER_MAGIC }, 148 { MOUNT_MFS, LINUX_DEFAULT_SUPER_MAGIC }, 149 { MOUNT_MSDOS, LINUX_MSDOS_SUPER_MAGIC }, 150 { MOUNT_LFS, LINUX_DEFAULT_SUPER_MAGIC }, 151 { MOUNT_FDESC, LINUX_DEFAULT_SUPER_MAGIC }, 152 { MOUNT_NULL, LINUX_DEFAULT_SUPER_MAGIC }, 153 { MOUNT_OVERLAY, LINUX_DEFAULT_SUPER_MAGIC }, 154 { MOUNT_UMAP, LINUX_DEFAULT_SUPER_MAGIC }, 155 { MOUNT_KERNFS, LINUX_DEFAULT_SUPER_MAGIC }, 156 { MOUNT_PROCFS, LINUX_PROC_SUPER_MAGIC }, 157 { MOUNT_AFS, LINUX_DEFAULT_SUPER_MAGIC }, 158 { MOUNT_CD9660, LINUX_ISOFS_SUPER_MAGIC }, 159 { MOUNT_UNION, LINUX_DEFAULT_SUPER_MAGIC }, 160 { MOUNT_ADOSFS, LINUX_ADFS_SUPER_MAGIC }, 161 { MOUNT_EXT2FS, LINUX_EXT2_SUPER_MAGIC }, 162 { MOUNT_CFS, LINUX_DEFAULT_SUPER_MAGIC }, 163 { MOUNT_CODA, LINUX_CODA_SUPER_MAGIC }, 164 { MOUNT_FILECORE, LINUX_DEFAULT_SUPER_MAGIC }, 165 { MOUNT_NTFS, LINUX_DEFAULT_SUPER_MAGIC }, 166 { MOUNT_SMBFS, LINUX_SMB_SUPER_MAGIC }, 167 { MOUNT_PTYFS, LINUX_DEVPTS_SUPER_MAGIC }, 168 { MOUNT_TMPFS, LINUX_TMPFS_SUPER_MAGIC } 169 }; 170 const int linux_fstypes_cnt = sizeof(linux_fstypes) / sizeof(linux_fstypes[0]); 171 172 # ifdef DEBUG_LINUX 173 #define DPRINTF(a) uprintf a 174 # else 175 #define DPRINTF(a) 176 # endif 177 178 /* Local linux_misc.c functions: */ 179 static void linux_to_bsd_mmap_args(struct sys_mmap_args *, 180 const struct linux_sys_mmap_args *); 181 static int linux_mmap(struct lwp *, const struct linux_sys_mmap_args *, 182 register_t *, off_t); 183 184 185 /* 186 * The information on a terminated (or stopped) process needs 187 * to be converted in order for Linux binaries to get a valid signal 188 * number out of it. 189 */ 190 int 191 bsd_to_linux_wstat(int st) 192 { 193 194 int sig; 195 196 if (WIFSIGNALED(st)) { 197 sig = WTERMSIG(st); 198 if (sig >= 0 && sig < NSIG) 199 st= (st & ~0177) | native_to_linux_signo[sig]; 200 } else if (WIFSTOPPED(st)) { 201 sig = WSTOPSIG(st); 202 if (sig >= 0 && sig < NSIG) 203 st = (st & ~0xff00) | 204 (native_to_linux_signo[sig] << 8); 205 } 206 return st; 207 } 208 209 /* 210 * wait4(2). Passed on to the NetBSD call, surrounded by code to 211 * reserve some space for a NetBSD-style wait status, and converting 212 * it to what Linux wants. 213 */ 214 int 215 linux_sys_wait4(struct lwp *l, const struct linux_sys_wait4_args *uap, register_t *retval) 216 { 217 /* { 218 syscallarg(int) pid; 219 syscallarg(int *) status; 220 syscallarg(int) options; 221 syscallarg(struct rusage50 *) rusage; 222 } */ 223 int error, status, options, linux_options, pid = SCARG(uap, pid); 224 struct rusage50 ru50; 225 struct rusage ru; 226 proc_t *p; 227 228 linux_options = SCARG(uap, options); 229 if (linux_options & ~(LINUX_WAIT4_KNOWNFLAGS)) 230 return (EINVAL); 231 232 options = 0; 233 if (linux_options & LINUX_WAIT4_WNOHANG) 234 options |= WNOHANG; 235 if (linux_options & LINUX_WAIT4_WUNTRACED) 236 options |= WUNTRACED; 237 if (linux_options & LINUX_WAIT4_WCONTINUED) 238 options |= WCONTINUED; 239 if (linux_options & LINUX_WAIT4_WALL) 240 options |= WALLSIG; 241 if (linux_options & LINUX_WAIT4_WCLONE) 242 options |= WALTSIG; 243 # ifdef DIAGNOSTIC 244 if (linux_options & LINUX_WAIT4_WNOTHREAD) 245 printf("WARNING: %s: linux process %d.%d called " 246 "waitpid with __WNOTHREAD set!\n", 247 __FILE__, l->l_proc->p_pid, l->l_lid); 248 249 # endif 250 251 error = do_sys_wait(&pid, &status, options, 252 SCARG(uap, rusage) != NULL ? &ru : NULL); 253 254 retval[0] = pid; 255 if (pid == 0) 256 return error; 257 258 p = curproc; 259 mutex_enter(p->p_lock); 260 sigdelset(&p->p_sigpend.sp_set, SIGCHLD); /* XXXAD ksiginfo leak */ 261 mutex_exit(p->p_lock); 262 263 if (SCARG(uap, rusage) != NULL) { 264 rusage_to_rusage50(&ru, &ru50); 265 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 266 } 267 268 if (error == 0 && SCARG(uap, status) != NULL) { 269 status = bsd_to_linux_wstat(status); 270 error = copyout(&status, SCARG(uap, status), sizeof status); 271 } 272 273 return error; 274 } 275 276 /* 277 * Linux brk(2). Like native, but always return the new break value. 278 */ 279 int 280 linux_sys_brk(struct lwp *l, const struct linux_sys_brk_args *uap, register_t *retval) 281 { 282 /* { 283 syscallarg(char *) nsize; 284 } */ 285 struct proc *p = l->l_proc; 286 struct vmspace *vm = p->p_vmspace; 287 struct sys_obreak_args oba; 288 289 SCARG(&oba, nsize) = SCARG(uap, nsize); 290 291 (void) sys_obreak(l, &oba, retval); 292 retval[0] = (register_t)((char *)vm->vm_daddr + ptoa(vm->vm_dsize)); 293 return 0; 294 } 295 296 /* 297 * Implement the fs stat functions. Straightforward. 298 */ 299 int 300 linux_sys_statfs(struct lwp *l, const struct linux_sys_statfs_args *uap, register_t *retval) 301 { 302 /* { 303 syscallarg(const char *) path; 304 syscallarg(struct linux_statfs *) sp; 305 } */ 306 struct statvfs *sb; 307 struct linux_statfs ltmp; 308 int error; 309 310 sb = STATVFSBUF_GET(); 311 error = do_sys_pstatvfs(l, SCARG(uap, path), ST_WAIT, sb); 312 if (error == 0) { 313 bsd_to_linux_statfs(sb, <mp); 314 error = copyout(<mp, SCARG(uap, sp), sizeof ltmp); 315 } 316 STATVFSBUF_PUT(sb); 317 318 return error; 319 } 320 321 int 322 linux_sys_fstatfs(struct lwp *l, const struct linux_sys_fstatfs_args *uap, register_t *retval) 323 { 324 /* { 325 syscallarg(int) fd; 326 syscallarg(struct linux_statfs *) sp; 327 } */ 328 struct statvfs *sb; 329 struct linux_statfs ltmp; 330 int error; 331 332 sb = STATVFSBUF_GET(); 333 error = do_sys_fstatvfs(l, SCARG(uap, fd), ST_WAIT, sb); 334 if (error == 0) { 335 bsd_to_linux_statfs(sb, <mp); 336 error = copyout(<mp, SCARG(uap, sp), sizeof ltmp); 337 } 338 STATVFSBUF_PUT(sb); 339 340 return error; 341 } 342 343 /* 344 * uname(). Just copy the info from the various strings stored in the 345 * kernel, and put it in the Linux utsname structure. That structure 346 * is almost the same as the NetBSD one, only it has fields 65 characters 347 * long, and an extra domainname field. 348 */ 349 int 350 linux_sys_uname(struct lwp *l, const struct linux_sys_uname_args *uap, register_t *retval) 351 { 352 /* { 353 syscallarg(struct linux_utsname *) up; 354 } */ 355 struct linux_utsname luts; 356 357 memset(&luts, 0, sizeof(luts)); 358 strlcpy(luts.l_sysname, linux_sysname, sizeof(luts.l_sysname)); 359 strlcpy(luts.l_nodename, hostname, sizeof(luts.l_nodename)); 360 strlcpy(luts.l_release, linux_release, sizeof(luts.l_release)); 361 strlcpy(luts.l_version, linux_version, sizeof(luts.l_version)); 362 strlcpy(luts.l_machine, LINUX_UNAME_ARCH, sizeof(luts.l_machine)); 363 strlcpy(luts.l_domainname, domainname, sizeof(luts.l_domainname)); 364 365 return copyout(&luts, SCARG(uap, up), sizeof(luts)); 366 } 367 368 /* Used directly on: alpha, mips, ppc, sparc, sparc64 */ 369 /* Used indirectly on: arm, i386, m68k */ 370 371 /* 372 * New type Linux mmap call. 373 * Only called directly on machines with >= 6 free regs. 374 */ 375 int 376 linux_sys_mmap(struct lwp *l, const struct linux_sys_mmap_args *uap, register_t *retval) 377 { 378 /* { 379 syscallarg(unsigned long) addr; 380 syscallarg(size_t) len; 381 syscallarg(int) prot; 382 syscallarg(int) flags; 383 syscallarg(int) fd; 384 syscallarg(linux_off_t) offset; 385 } */ 386 387 if (SCARG(uap, offset) & PAGE_MASK) 388 return EINVAL; 389 390 return linux_mmap(l, uap, retval, SCARG(uap, offset)); 391 } 392 393 /* 394 * Guts of most architectures' mmap64() implementations. This shares 395 * its list of arguments with linux_sys_mmap(). 396 * 397 * The difference in linux_sys_mmap2() is that "offset" is actually 398 * (offset / pagesize), not an absolute byte count. This translation 399 * to pagesize offsets is done inside glibc between the mmap64() call 400 * point, and the actual syscall. 401 */ 402 int 403 linux_sys_mmap2(struct lwp *l, const struct linux_sys_mmap2_args *uap, register_t *retval) 404 { 405 /* { 406 syscallarg(unsigned long) addr; 407 syscallarg(size_t) len; 408 syscallarg(int) prot; 409 syscallarg(int) flags; 410 syscallarg(int) fd; 411 syscallarg(linux_off_t) offset; 412 } */ 413 414 return linux_mmap(l, uap, retval, 415 ((off_t)SCARG(uap, offset)) << PAGE_SHIFT); 416 } 417 418 /* 419 * Massage arguments and call system mmap(2). 420 */ 421 static int 422 linux_mmap(struct lwp *l, const struct linux_sys_mmap_args *uap, register_t *retval, off_t offset) 423 { 424 struct sys_mmap_args cma; 425 int error; 426 size_t mmoff=0; 427 428 linux_to_bsd_mmap_args(&cma, uap); 429 SCARG(&cma, pos) = offset; 430 431 if (SCARG(uap, flags) & LINUX_MAP_GROWSDOWN) { 432 /* 433 * Request for stack-like memory segment. On linux, this 434 * works by mmap()ping (small) segment, which is automatically 435 * extended when page fault happens below the currently 436 * allocated area. We emulate this by allocating (typically 437 * bigger) segment sized at current stack size limit, and 438 * offsetting the requested and returned address accordingly. 439 * Since physical pages are only allocated on-demand, this 440 * is effectively identical. 441 */ 442 rlim_t ssl = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur; 443 444 if (SCARG(&cma, len) < ssl) { 445 /* Compute the address offset */ 446 mmoff = round_page(ssl) - SCARG(uap, len); 447 448 if (SCARG(&cma, addr)) 449 SCARG(&cma, addr) = (char *)SCARG(&cma, addr) - mmoff; 450 451 SCARG(&cma, len) = (size_t) ssl; 452 } 453 } 454 455 error = sys_mmap(l, &cma, retval); 456 if (error) 457 return (error); 458 459 /* Shift the returned address for stack-like segment if necessary */ 460 retval[0] += mmoff; 461 462 return (0); 463 } 464 465 static void 466 linux_to_bsd_mmap_args(struct sys_mmap_args *cma, const struct linux_sys_mmap_args *uap) 467 { 468 int flags = MAP_TRYFIXED, fl = SCARG(uap, flags); 469 470 flags |= cvtto_bsd_mask(fl, LINUX_MAP_SHARED, MAP_SHARED); 471 flags |= cvtto_bsd_mask(fl, LINUX_MAP_PRIVATE, MAP_PRIVATE); 472 flags |= cvtto_bsd_mask(fl, LINUX_MAP_FIXED, MAP_FIXED); 473 flags |= cvtto_bsd_mask(fl, LINUX_MAP_ANON, MAP_ANON); 474 flags |= cvtto_bsd_mask(fl, LINUX_MAP_LOCKED, MAP_WIRED); 475 /* XXX XAX ERH: Any other flags here? There are more defined... */ 476 477 SCARG(cma, addr) = (void *)SCARG(uap, addr); 478 SCARG(cma, len) = SCARG(uap, len); 479 SCARG(cma, prot) = SCARG(uap, prot); 480 if (SCARG(cma, prot) & VM_PROT_WRITE) /* XXX */ 481 SCARG(cma, prot) |= VM_PROT_READ; 482 SCARG(cma, flags) = flags; 483 SCARG(cma, fd) = flags & MAP_ANON ? -1 : SCARG(uap, fd); 484 SCARG(cma, PAD) = 0; 485 } 486 487 #define LINUX_MREMAP_MAYMOVE 1 488 #define LINUX_MREMAP_FIXED 2 489 490 int 491 linux_sys_mremap(struct lwp *l, const struct linux_sys_mremap_args *uap, register_t *retval) 492 { 493 /* { 494 syscallarg(void *) old_address; 495 syscallarg(size_t) old_size; 496 syscallarg(size_t) new_size; 497 syscallarg(u_long) flags; 498 } */ 499 500 struct proc *p; 501 struct vm_map *map; 502 vaddr_t oldva; 503 vaddr_t newva; 504 size_t oldsize; 505 size_t newsize; 506 int flags; 507 int uvmflags; 508 int error; 509 510 flags = SCARG(uap, flags); 511 oldva = (vaddr_t)SCARG(uap, old_address); 512 oldsize = round_page(SCARG(uap, old_size)); 513 newsize = round_page(SCARG(uap, new_size)); 514 if ((flags & ~(LINUX_MREMAP_FIXED|LINUX_MREMAP_MAYMOVE)) != 0) { 515 error = EINVAL; 516 goto done; 517 } 518 if ((flags & LINUX_MREMAP_FIXED) != 0) { 519 if ((flags & LINUX_MREMAP_MAYMOVE) == 0) { 520 error = EINVAL; 521 goto done; 522 } 523 #if 0 /* notyet */ 524 newva = SCARG(uap, new_address); 525 uvmflags = MAP_FIXED; 526 #else /* notyet */ 527 error = EOPNOTSUPP; 528 goto done; 529 #endif /* notyet */ 530 } else if ((flags & LINUX_MREMAP_MAYMOVE) != 0) { 531 uvmflags = 0; 532 } else { 533 newva = oldva; 534 uvmflags = MAP_FIXED; 535 } 536 p = l->l_proc; 537 map = &p->p_vmspace->vm_map; 538 error = uvm_mremap(map, oldva, oldsize, map, &newva, newsize, p, 539 uvmflags); 540 541 done: 542 *retval = (error != 0) ? 0 : (register_t)newva; 543 return error; 544 } 545 546 #ifdef USRSTACK 547 int 548 linux_sys_mprotect(struct lwp *l, const struct linux_sys_mprotect_args *uap, register_t *retval) 549 { 550 /* { 551 syscallarg(const void *) start; 552 syscallarg(unsigned long) len; 553 syscallarg(int) prot; 554 } */ 555 struct vm_map_entry *entry; 556 struct vm_map *map; 557 struct proc *p; 558 vaddr_t end, start, len, stacklim; 559 int prot, grows; 560 561 start = (vaddr_t)SCARG(uap, start); 562 len = round_page(SCARG(uap, len)); 563 prot = SCARG(uap, prot); 564 grows = prot & (LINUX_PROT_GROWSDOWN | LINUX_PROT_GROWSUP); 565 prot &= ~grows; 566 end = start + len; 567 568 if (start & PAGE_MASK) 569 return EINVAL; 570 if (end < start) 571 return EINVAL; 572 if (end == start) 573 return 0; 574 575 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) 576 return EINVAL; 577 if (grows == (LINUX_PROT_GROWSDOWN | LINUX_PROT_GROWSUP)) 578 return EINVAL; 579 580 p = l->l_proc; 581 map = &p->p_vmspace->vm_map; 582 vm_map_lock(map); 583 # ifdef notdef 584 VM_MAP_RANGE_CHECK(map, start, end); 585 # endif 586 if (!uvm_map_lookup_entry(map, start, &entry) || entry->start > start) { 587 vm_map_unlock(map); 588 return ENOMEM; 589 } 590 591 /* 592 * Approximate the behaviour of PROT_GROWS{DOWN,UP}. 593 */ 594 595 stacklim = (vaddr_t)p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur; 596 if (grows & LINUX_PROT_GROWSDOWN) { 597 if (USRSTACK - stacklim <= start && start < USRSTACK) { 598 start = USRSTACK - stacklim; 599 } else { 600 start = entry->start; 601 } 602 } else if (grows & LINUX_PROT_GROWSUP) { 603 if (USRSTACK <= end && end < USRSTACK + stacklim) { 604 end = USRSTACK + stacklim; 605 } else { 606 end = entry->end; 607 } 608 } 609 vm_map_unlock(map); 610 return uvm_map_protect_user(l, start, end, prot); 611 } 612 #endif /* USRSTACK */ 613 614 /* 615 * This code is partly stolen from src/lib/libc/compat-43/times.c 616 */ 617 618 #define CONVTCK(r) (r.tv_sec * hz + r.tv_usec / (1000000 / hz)) 619 620 int 621 linux_sys_times(struct lwp *l, const struct linux_sys_times_args *uap, register_t *retval) 622 { 623 /* { 624 syscallarg(struct times *) tms; 625 } */ 626 struct proc *p = l->l_proc; 627 struct timeval t; 628 int error; 629 630 if (SCARG(uap, tms)) { 631 struct linux_tms ltms; 632 struct rusage ru; 633 634 memset(<ms, 0, sizeof(ltms)); 635 636 mutex_enter(p->p_lock); 637 calcru(p, &ru.ru_utime, &ru.ru_stime, NULL, NULL); 638 ltms.ltms_utime = CONVTCK(ru.ru_utime); 639 ltms.ltms_stime = CONVTCK(ru.ru_stime); 640 ltms.ltms_cutime = CONVTCK(p->p_stats->p_cru.ru_utime); 641 ltms.ltms_cstime = CONVTCK(p->p_stats->p_cru.ru_stime); 642 mutex_exit(p->p_lock); 643 644 if ((error = copyout(<ms, SCARG(uap, tms), sizeof ltms))) 645 return error; 646 } 647 648 getmicrouptime(&t); 649 650 retval[0] = ((linux_clock_t)(CONVTCK(t))); 651 return 0; 652 } 653 654 #undef CONVTCK 655 656 #if !defined(__aarch64__) 657 /* 658 * Linux 'readdir' call. This code is mostly taken from the 659 * SunOS getdents call (see compat/sunos/sunos_misc.c), though 660 * an attempt has been made to keep it a little cleaner (failing 661 * miserably, because of the cruft needed if count 1 is passed). 662 * 663 * The d_off field should contain the offset of the next valid entry, 664 * but in Linux it has the offset of the entry itself. We emulate 665 * that bug here. 666 * 667 * Read in BSD-style entries, convert them, and copy them out. 668 * 669 * Note that this doesn't handle union-mounted filesystems. 670 */ 671 int 672 linux_sys_getdents(struct lwp *l, const struct linux_sys_getdents_args *uap, register_t *retval) 673 { 674 /* { 675 syscallarg(int) fd; 676 syscallarg(struct linux_dirent *) dent; 677 syscallarg(unsigned int) count; 678 } */ 679 struct dirent *bdp; 680 struct vnode *vp; 681 char *inp, *tbuf; /* BSD-format */ 682 int len, reclen; /* BSD-format */ 683 char *outp; /* Linux-format */ 684 int resid, linux_reclen = 0; /* Linux-format */ 685 struct file *fp; 686 struct uio auio; 687 struct iovec aiov; 688 struct linux_dirent idb; 689 off_t off; /* true file offset */ 690 int buflen, error, eofflag, nbytes, oldcall; 691 struct vattr va; 692 off_t *cookiebuf = NULL, *cookie; 693 int ncookies; 694 695 /* fd_getvnode() will use the descriptor for us */ 696 if ((error = fd_getvnode(SCARG(uap, fd), &fp)) != 0) 697 return (error); 698 699 if ((fp->f_flag & FREAD) == 0) { 700 error = EBADF; 701 goto out1; 702 } 703 704 vp = (struct vnode *)fp->f_data; 705 if (vp->v_type != VDIR) { 706 error = ENOTDIR; 707 goto out1; 708 } 709 710 vn_lock(vp, LK_SHARED | LK_RETRY); 711 error = VOP_GETATTR(vp, &va, l->l_cred); 712 VOP_UNLOCK(vp); 713 if (error) 714 goto out1; 715 716 nbytes = SCARG(uap, count); 717 if (nbytes == 1) { /* emulating old, broken behaviour */ 718 nbytes = sizeof (idb); 719 buflen = uimax(va.va_blocksize, nbytes); 720 oldcall = 1; 721 } else { 722 buflen = uimin(MAXBSIZE, nbytes); 723 if (buflen < va.va_blocksize) 724 buflen = va.va_blocksize; 725 oldcall = 0; 726 } 727 tbuf = malloc(buflen, M_TEMP, M_WAITOK); 728 729 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 730 off = fp->f_offset; 731 again: 732 aiov.iov_base = tbuf; 733 aiov.iov_len = buflen; 734 auio.uio_iov = &aiov; 735 auio.uio_iovcnt = 1; 736 auio.uio_rw = UIO_READ; 737 auio.uio_resid = buflen; 738 auio.uio_offset = off; 739 UIO_SETUP_SYSSPACE(&auio); 740 /* 741 * First we read into the malloc'ed buffer, then 742 * we massage it into user space, one record at a time. 743 */ 744 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &cookiebuf, 745 &ncookies); 746 if (error) 747 goto out; 748 749 inp = tbuf; 750 outp = (void *)SCARG(uap, dent); 751 resid = nbytes; 752 if ((len = buflen - auio.uio_resid) == 0) 753 goto eof; 754 755 for (cookie = cookiebuf; len > 0; len -= reclen) { 756 bdp = (struct dirent *)inp; 757 reclen = bdp->d_reclen; 758 if (reclen & 3) { 759 error = EIO; 760 goto out; 761 } 762 if (bdp->d_fileno == 0) { 763 inp += reclen; /* it is a hole; squish it out */ 764 if (cookie) 765 off = *cookie++; 766 else 767 off += reclen; 768 continue; 769 } 770 linux_reclen = LINUX_RECLEN(&idb, bdp->d_namlen); 771 if (reclen > len || resid < linux_reclen) { 772 /* entry too big for buffer, so just stop */ 773 outp++; 774 break; 775 } 776 /* 777 * Massage in place to make a Linux-shaped dirent (otherwise 778 * we have to worry about touching user memory outside of 779 * the copyout() call). 780 */ 781 memset(&idb, 0, sizeof(idb)); 782 idb.d_ino = bdp->d_fileno; 783 /* 784 * The old readdir() call misuses the offset and reclen fields. 785 */ 786 if (oldcall) { 787 idb.d_off = (linux_off_t)linux_reclen; 788 idb.d_reclen = (u_short)bdp->d_namlen; 789 } else { 790 if (sizeof (idb.d_off) <= 4 && (off >> 32) != 0) { 791 compat_offseterr(vp, "linux_getdents"); 792 error = EINVAL; 793 goto out; 794 } 795 idb.d_off = (linux_off_t)off; 796 idb.d_reclen = (u_short)linux_reclen; 797 /* Linux puts d_type at the end of each record */ 798 *((char *)&idb + idb.d_reclen - 1) = bdp->d_type; 799 } 800 memcpy(idb.d_name, bdp->d_name, 801 MIN(sizeof(idb.d_name), bdp->d_namlen + 1)); 802 if ((error = copyout((void *)&idb, outp, linux_reclen))) 803 goto out; 804 /* advance past this real entry */ 805 inp += reclen; 806 if (cookie) 807 off = *cookie++; /* each entry points to itself */ 808 else 809 off += reclen; 810 /* advance output past Linux-shaped entry */ 811 outp += linux_reclen; 812 resid -= linux_reclen; 813 if (oldcall) 814 break; 815 } 816 817 /* if we squished out the whole block, try again */ 818 if (outp == (void *)SCARG(uap, dent)) { 819 if (cookiebuf) 820 free(cookiebuf, M_TEMP); 821 cookiebuf = NULL; 822 goto again; 823 } 824 fp->f_offset = off; /* update the vnode offset */ 825 826 if (oldcall) 827 nbytes = resid + linux_reclen; 828 829 eof: 830 *retval = nbytes - resid; 831 out: 832 VOP_UNLOCK(vp); 833 if (cookiebuf) 834 free(cookiebuf, M_TEMP); 835 free(tbuf, M_TEMP); 836 out1: 837 fd_putfile(SCARG(uap, fd)); 838 return error; 839 } 840 #endif 841 842 #if !defined(__aarch64__) 843 /* 844 * Even when just using registers to pass arguments to syscalls you can 845 * have 5 of them on the i386. So this newer version of select() does 846 * this. 847 */ 848 int 849 linux_sys_select(struct lwp *l, const struct linux_sys_select_args *uap, register_t *retval) 850 { 851 /* { 852 syscallarg(int) nfds; 853 syscallarg(fd_set *) readfds; 854 syscallarg(fd_set *) writefds; 855 syscallarg(fd_set *) exceptfds; 856 syscallarg(struct timeval50 *) timeout; 857 } */ 858 859 return linux_select1(l, retval, SCARG(uap, nfds), SCARG(uap, readfds), 860 SCARG(uap, writefds), SCARG(uap, exceptfds), 861 (struct linux_timeval *)SCARG(uap, timeout)); 862 } 863 864 /* 865 * Common code for the old and new versions of select(). A couple of 866 * things are important: 867 * 1) return the amount of time left in the 'timeout' parameter 868 * 2) select never returns ERESTART on Linux, always return EINTR 869 */ 870 int 871 linux_select1(struct lwp *l, register_t *retval, int nfds, fd_set *readfds, 872 fd_set *writefds, fd_set *exceptfds, struct linux_timeval *timeout) 873 { 874 struct timespec ts0, ts1, uts, *ts = NULL; 875 struct linux_timeval ltv; 876 int error; 877 878 /* 879 * Store current time for computation of the amount of 880 * time left. 881 */ 882 if (timeout) { 883 if ((error = copyin(timeout, <v, sizeof(ltv)))) 884 return error; 885 uts.tv_sec = ltv.tv_sec; 886 uts.tv_nsec = (long)((unsigned long)ltv.tv_usec * 1000); 887 if (itimespecfix(&uts)) { 888 /* 889 * The timeval was invalid. Convert it to something 890 * valid that will act as it does under Linux. 891 */ 892 uts.tv_sec += uts.tv_nsec / 1000000000; 893 uts.tv_nsec %= 1000000000; 894 if (uts.tv_nsec < 0) { 895 uts.tv_sec -= 1; 896 uts.tv_nsec += 1000000000; 897 } 898 if (uts.tv_sec < 0) 899 timespecclear(&uts); 900 } 901 ts = &uts; 902 nanotime(&ts0); 903 } 904 905 error = selcommon(retval, nfds, readfds, writefds, exceptfds, ts, NULL); 906 907 if (error) { 908 /* 909 * See fs/select.c in the Linux kernel. Without this, 910 * Maelstrom doesn't work. 911 */ 912 if (error == ERESTART) 913 error = EINTR; 914 return error; 915 } 916 917 if (timeout) { 918 if (*retval) { 919 /* 920 * Compute how much time was left of the timeout, 921 * by subtracting the current time and the time 922 * before we started the call, and subtracting 923 * that result from the user-supplied value. 924 */ 925 nanotime(&ts1); 926 timespecsub(&ts1, &ts0, &ts1); 927 timespecsub(&uts, &ts1, &uts); 928 if (uts.tv_sec < 0) 929 timespecclear(&uts); 930 } else 931 timespecclear(&uts); 932 ltv.tv_sec = uts.tv_sec; 933 ltv.tv_usec = uts.tv_nsec / 1000; 934 if ((error = copyout(<v, timeout, sizeof(ltv)))) 935 return error; 936 } 937 938 return 0; 939 } 940 #endif 941 942 /* 943 * Derived from FreeBSD's sys/compat/linux/linux_misc.c:linux_pselect6() 944 * which was contributed by Dmitry Chagin 945 * https://svnweb.freebsd.org/base?view=revision&revision=283403 946 */ 947 int 948 linux_sys_pselect6(struct lwp *l, 949 const struct linux_sys_pselect6_args *uap, register_t *retval) 950 { 951 /* { 952 syscallarg(int) nfds; 953 syscallarg(fd_set *) readfds; 954 syscallarg(fd_set *) writefds; 955 syscallarg(fd_set *) exceptfds; 956 syscallarg(struct timespec *) timeout; 957 syscallarg(linux_sized_sigset_t *) ss; 958 } */ 959 struct timespec uts, ts0, ts1, *tsp; 960 linux_sized_sigset_t lsss; 961 struct linux_timespec lts; 962 linux_sigset_t lss; 963 sigset_t *ssp; 964 sigset_t ss; 965 int error; 966 967 ssp = NULL; 968 if (SCARG(uap, ss) != NULL) { 969 if ((error = copyin(SCARG(uap, ss), &lsss, sizeof(lsss))) != 0) 970 return (error); 971 if (lsss.ss_len != sizeof(lss)) 972 return (EINVAL); 973 if (lsss.ss != NULL) { 974 if ((error = copyin(lsss.ss, &lss, sizeof(lss))) != 0) 975 return (error); 976 linux_to_native_sigset(&ss, &lss); 977 ssp = &ss; 978 } 979 } 980 981 if (SCARG(uap, timeout) != NULL) { 982 error = copyin(SCARG(uap, timeout), <s, sizeof(lts)); 983 if (error != 0) 984 return (error); 985 linux_to_native_timespec(&uts, <s); 986 987 if (itimespecfix(&uts)) 988 return (EINVAL); 989 990 nanotime(&ts0); 991 tsp = &uts; 992 } else { 993 tsp = NULL; 994 } 995 996 error = selcommon(retval, SCARG(uap, nfds), SCARG(uap, readfds), 997 SCARG(uap, writefds), SCARG(uap, exceptfds), tsp, ssp); 998 999 if (error == 0 && tsp != NULL) { 1000 if (retval != 0) { 1001 /* 1002 * Compute how much time was left of the timeout, 1003 * by subtracting the current time and the time 1004 * before we started the call, and subtracting 1005 * that result from the user-supplied value. 1006 */ 1007 nanotime(&ts1); 1008 timespecsub(&ts1, &ts0, &ts1); 1009 timespecsub(&uts, &ts1, &uts); 1010 if (uts.tv_sec < 0) 1011 timespecclear(&uts); 1012 } else { 1013 timespecclear(&uts); 1014 } 1015 1016 native_to_linux_timespec(<s, &uts); 1017 error = copyout(<s, SCARG(uap, timeout), sizeof(lts)); 1018 } 1019 1020 return (error); 1021 } 1022 1023 int 1024 linux_sys_ppoll(struct lwp *l, 1025 const struct linux_sys_ppoll_args *uap, register_t *retval) 1026 { 1027 /* { 1028 syscallarg(struct pollfd *) fds; 1029 syscallarg(u_int) nfds; 1030 syscallarg(struct linux_timespec *) timeout; 1031 syscallarg(linux_sigset_t *) sigset; 1032 } */ 1033 struct linux_timespec lts0, *lts; 1034 struct timespec ts0, *ts = NULL; 1035 linux_sigset_t lsigmask0, *lsigmask; 1036 sigset_t sigmask0, *sigmask = NULL; 1037 int error; 1038 1039 lts = SCARG(uap, timeout); 1040 if (lts) { 1041 if ((error = copyin(lts, <s0, sizeof(lts0))) != 0) 1042 return error; 1043 linux_to_native_timespec(&ts0, <s0); 1044 ts = &ts0; 1045 } 1046 1047 lsigmask = SCARG(uap, sigset); 1048 if (lsigmask) { 1049 if ((error = copyin(lsigmask, &lsigmask0, sizeof(lsigmask0)))) 1050 return error; 1051 linux_to_native_sigset(&sigmask0, &lsigmask0); 1052 sigmask = &sigmask0; 1053 } 1054 1055 return pollcommon(retval, SCARG(uap, fds), SCARG(uap, nfds), 1056 ts, sigmask); 1057 } 1058 1059 /* 1060 * Set the 'personality' (emulation mode) for the current process. Only 1061 * accept the Linux personality here (0). This call is needed because 1062 * the Linux ELF crt0 issues it in an ugly kludge to make sure that 1063 * ELF binaries run in Linux mode, not SVR4 mode. 1064 */ 1065 int 1066 linux_sys_personality(struct lwp *l, const struct linux_sys_personality_args *uap, register_t *retval) 1067 { 1068 /* { 1069 syscallarg(unsigned long) per; 1070 } */ 1071 struct linux_emuldata *led; 1072 int per; 1073 1074 per = SCARG(uap, per); 1075 led = l->l_emuldata; 1076 if (per == LINUX_PER_QUERY) { 1077 retval[0] = led->led_personality; 1078 return 0; 1079 } 1080 1081 switch (per & LINUX_PER_MASK) { 1082 case LINUX_PER_LINUX: 1083 case LINUX_PER_LINUX32: 1084 led->led_personality = per; 1085 break; 1086 1087 default: 1088 return EINVAL; 1089 } 1090 1091 retval[0] = per; 1092 return 0; 1093 } 1094 1095 /* 1096 * We have nonexistent fsuid equal to uid. 1097 * If modification is requested, refuse. 1098 */ 1099 int 1100 linux_sys_setfsuid(struct lwp *l, const struct linux_sys_setfsuid_args *uap, register_t *retval) 1101 { 1102 /* { 1103 syscallarg(uid_t) uid; 1104 } */ 1105 uid_t uid; 1106 1107 uid = SCARG(uap, uid); 1108 if (kauth_cred_getuid(l->l_cred) != uid) 1109 return sys_nosys(l, uap, retval); 1110 1111 *retval = uid; 1112 return 0; 1113 } 1114 1115 int 1116 linux_sys_setfsgid(struct lwp *l, const struct linux_sys_setfsgid_args *uap, register_t *retval) 1117 { 1118 /* { 1119 syscallarg(gid_t) gid; 1120 } */ 1121 gid_t gid; 1122 1123 gid = SCARG(uap, gid); 1124 if (kauth_cred_getgid(l->l_cred) != gid) 1125 return sys_nosys(l, uap, retval); 1126 1127 *retval = gid; 1128 return 0; 1129 } 1130 1131 int 1132 linux_sys_setresuid(struct lwp *l, const struct linux_sys_setresuid_args *uap, register_t *retval) 1133 { 1134 /* { 1135 syscallarg(uid_t) ruid; 1136 syscallarg(uid_t) euid; 1137 syscallarg(uid_t) suid; 1138 } */ 1139 1140 /* 1141 * Note: These checks are a little different than the NetBSD 1142 * setreuid(2) call performs. This precisely follows the 1143 * behavior of the Linux kernel. 1144 */ 1145 1146 return do_setresuid(l, SCARG(uap, ruid), SCARG(uap, euid), 1147 SCARG(uap, suid), 1148 ID_R_EQ_R | ID_R_EQ_E | ID_R_EQ_S | 1149 ID_E_EQ_R | ID_E_EQ_E | ID_E_EQ_S | 1150 ID_S_EQ_R | ID_S_EQ_E | ID_S_EQ_S ); 1151 } 1152 1153 int 1154 linux_sys_getresuid(struct lwp *l, const struct linux_sys_getresuid_args *uap, register_t *retval) 1155 { 1156 /* { 1157 syscallarg(uid_t *) ruid; 1158 syscallarg(uid_t *) euid; 1159 syscallarg(uid_t *) suid; 1160 } */ 1161 kauth_cred_t pc = l->l_cred; 1162 int error; 1163 uid_t uid; 1164 1165 /* 1166 * Linux copies these values out to userspace like so: 1167 * 1168 * 1. Copy out ruid. 1169 * 2. If that succeeds, copy out euid. 1170 * 3. If both of those succeed, copy out suid. 1171 */ 1172 uid = kauth_cred_getuid(pc); 1173 if ((error = copyout(&uid, SCARG(uap, ruid), sizeof(uid_t))) != 0) 1174 return (error); 1175 1176 uid = kauth_cred_geteuid(pc); 1177 if ((error = copyout(&uid, SCARG(uap, euid), sizeof(uid_t))) != 0) 1178 return (error); 1179 1180 uid = kauth_cred_getsvuid(pc); 1181 1182 return (copyout(&uid, SCARG(uap, suid), sizeof(uid_t))); 1183 } 1184 1185 int 1186 linux_sys_ptrace(struct lwp *l, const struct linux_sys_ptrace_args *uap, register_t *retval) 1187 { 1188 /* { 1189 i386, m68k, powerpc: T=int 1190 alpha, amd64: T=long 1191 syscallarg(T) request; 1192 syscallarg(T) pid; 1193 syscallarg(T) addr; 1194 syscallarg(T) data; 1195 } */ 1196 const int *ptr; 1197 int request; 1198 int error; 1199 1200 ptr = linux_ptrace_request_map; 1201 request = SCARG(uap, request); 1202 while (*ptr != -1) 1203 if (*ptr++ == request) { 1204 struct sys_ptrace_args pta; 1205 1206 SCARG(&pta, req) = *ptr; 1207 SCARG(&pta, pid) = SCARG(uap, pid); 1208 SCARG(&pta, addr) = (void *)SCARG(uap, addr); 1209 SCARG(&pta, data) = SCARG(uap, data); 1210 1211 /* 1212 * Linux ptrace(PTRACE_CONT, pid, 0, 0) means actually 1213 * to continue where the process left off previously. 1214 * The same thing is achieved by addr == (void *) 1 1215 * on NetBSD, so rewrite 'addr' appropriately. 1216 */ 1217 if (request == LINUX_PTRACE_CONT && SCARG(uap, addr)==0) 1218 SCARG(&pta, addr) = (void *) 1; 1219 1220 error = sysent[SYS_ptrace].sy_call(l, &pta, retval); 1221 if (error) 1222 return error; 1223 switch (request) { 1224 case LINUX_PTRACE_PEEKTEXT: 1225 case LINUX_PTRACE_PEEKDATA: 1226 error = copyout (retval, 1227 (void *)SCARG(uap, data), 1228 sizeof *retval); 1229 *retval = SCARG(uap, data); 1230 break; 1231 default: 1232 break; 1233 } 1234 return error; 1235 } 1236 else 1237 ptr++; 1238 1239 return LINUX_SYS_PTRACE_ARCH(l, uap, retval); 1240 } 1241 1242 int 1243 linux_sys_reboot(struct lwp *l, const struct linux_sys_reboot_args *uap, register_t *retval) 1244 { 1245 /* { 1246 syscallarg(int) magic1; 1247 syscallarg(int) magic2; 1248 syscallarg(int) cmd; 1249 syscallarg(void *) arg; 1250 } */ 1251 struct sys_reboot_args /* { 1252 syscallarg(int) opt; 1253 syscallarg(char *) bootstr; 1254 } */ sra; 1255 int error; 1256 1257 if ((error = kauth_authorize_system(l->l_cred, 1258 KAUTH_SYSTEM_REBOOT, 0, NULL, NULL, NULL)) != 0) 1259 return(error); 1260 1261 if (SCARG(uap, magic1) != LINUX_REBOOT_MAGIC1) 1262 return(EINVAL); 1263 if (SCARG(uap, magic2) != LINUX_REBOOT_MAGIC2 && 1264 SCARG(uap, magic2) != LINUX_REBOOT_MAGIC2A && 1265 SCARG(uap, magic2) != LINUX_REBOOT_MAGIC2B) 1266 return(EINVAL); 1267 1268 switch ((unsigned long)SCARG(uap, cmd)) { 1269 case LINUX_REBOOT_CMD_RESTART: 1270 SCARG(&sra, opt) = RB_AUTOBOOT; 1271 break; 1272 case LINUX_REBOOT_CMD_HALT: 1273 SCARG(&sra, opt) = RB_HALT; 1274 break; 1275 case LINUX_REBOOT_CMD_POWER_OFF: 1276 SCARG(&sra, opt) = RB_HALT|RB_POWERDOWN; 1277 break; 1278 case LINUX_REBOOT_CMD_RESTART2: 1279 /* Reboot with an argument. */ 1280 SCARG(&sra, opt) = RB_AUTOBOOT|RB_STRING; 1281 SCARG(&sra, bootstr) = SCARG(uap, arg); 1282 break; 1283 case LINUX_REBOOT_CMD_CAD_ON: 1284 return(EINVAL); /* We don't implement ctrl-alt-delete */ 1285 case LINUX_REBOOT_CMD_CAD_OFF: 1286 return(0); 1287 default: 1288 return(EINVAL); 1289 } 1290 1291 return(sys_reboot(l, &sra, retval)); 1292 } 1293 1294 /* 1295 * Copy of compat_12_sys_swapon(). 1296 */ 1297 int 1298 linux_sys_swapon(struct lwp *l, const struct linux_sys_swapon_args *uap, register_t *retval) 1299 { 1300 /* { 1301 syscallarg(const char *) name; 1302 } */ 1303 struct sys_swapctl_args ua; 1304 1305 SCARG(&ua, cmd) = SWAP_ON; 1306 SCARG(&ua, arg) = (void *)__UNCONST(SCARG(uap, name)); 1307 SCARG(&ua, misc) = 0; /* priority */ 1308 return (sys_swapctl(l, &ua, retval)); 1309 } 1310 1311 /* 1312 * Stop swapping to the file or block device specified by path. 1313 */ 1314 int 1315 linux_sys_swapoff(struct lwp *l, const struct linux_sys_swapoff_args *uap, register_t *retval) 1316 { 1317 /* { 1318 syscallarg(const char *) path; 1319 } */ 1320 struct sys_swapctl_args ua; 1321 1322 SCARG(&ua, cmd) = SWAP_OFF; 1323 SCARG(&ua, arg) = __UNCONST(SCARG(uap, path)); /*XXXUNCONST*/ 1324 return (sys_swapctl(l, &ua, retval)); 1325 } 1326 1327 /* 1328 * Copy of compat_09_sys_setdomainname() 1329 */ 1330 /* ARGSUSED */ 1331 int 1332 linux_sys_setdomainname(struct lwp *l, const struct linux_sys_setdomainname_args *uap, register_t *retval) 1333 { 1334 /* { 1335 syscallarg(char *) domainname; 1336 syscallarg(int) len; 1337 } */ 1338 int name[2]; 1339 1340 name[0] = CTL_KERN; 1341 name[1] = KERN_DOMAINNAME; 1342 return (old_sysctl(&name[0], 2, 0, 0, SCARG(uap, domainname), 1343 SCARG(uap, len), l)); 1344 } 1345 1346 /* 1347 * sysinfo() 1348 */ 1349 /* ARGSUSED */ 1350 int 1351 linux_sys_sysinfo(struct lwp *l, const struct linux_sys_sysinfo_args *uap, register_t *retval) 1352 { 1353 /* { 1354 syscallarg(struct linux_sysinfo *) arg; 1355 } */ 1356 struct linux_sysinfo si; 1357 struct loadavg *la; 1358 int64_t filepg; 1359 1360 memset(&si, 0, sizeof(si)); 1361 si.uptime = time_uptime; 1362 la = &averunnable; 1363 si.loads[0] = la->ldavg[0] * LINUX_SYSINFO_LOADS_SCALE / la->fscale; 1364 si.loads[1] = la->ldavg[1] * LINUX_SYSINFO_LOADS_SCALE / la->fscale; 1365 si.loads[2] = la->ldavg[2] * LINUX_SYSINFO_LOADS_SCALE / la->fscale; 1366 si.totalram = ctob((u_long)physmem); 1367 /* uvm_availmem() may sync the counters. */ 1368 si.freeram = (u_long)uvm_availmem(true) * uvmexp.pagesize; 1369 filepg = cpu_count_get(CPU_COUNT_FILECLEAN) + 1370 cpu_count_get(CPU_COUNT_FILEDIRTY) + 1371 cpu_count_get(CPU_COUNT_FILEUNKNOWN) - 1372 cpu_count_get(CPU_COUNT_EXECPAGES); 1373 si.sharedram = 0; /* XXX */ 1374 si.bufferram = (u_long)(filepg * uvmexp.pagesize); 1375 si.totalswap = (u_long)uvmexp.swpages * uvmexp.pagesize; 1376 si.freeswap = 1377 (u_long)(uvmexp.swpages - uvmexp.swpginuse) * uvmexp.pagesize; 1378 si.procs = atomic_load_relaxed(&nprocs); 1379 1380 /* The following are only present in newer Linux kernels. */ 1381 si.totalbig = 0; 1382 si.freebig = 0; 1383 si.mem_unit = 1; 1384 1385 return (copyout(&si, SCARG(uap, arg), sizeof si)); 1386 } 1387 1388 int 1389 linux_sys_getrlimit(struct lwp *l, const struct linux_sys_getrlimit_args *uap, register_t *retval) 1390 { 1391 /* { 1392 syscallarg(int) which; 1393 # ifdef LINUX_LARGEFILE64 1394 syscallarg(struct rlimit *) rlp; 1395 # else 1396 syscallarg(struct orlimit *) rlp; 1397 # endif 1398 } */ 1399 # ifdef LINUX_LARGEFILE64 1400 struct rlimit orl; 1401 # else 1402 struct orlimit orl; 1403 # endif 1404 int which; 1405 1406 which = linux_to_bsd_limit(SCARG(uap, which)); 1407 if (which < 0) 1408 return -which; 1409 1410 memset(&orl, 0, sizeof(orl)); 1411 bsd_to_linux_rlimit(&orl, &l->l_proc->p_rlimit[which]); 1412 1413 return copyout(&orl, SCARG(uap, rlp), sizeof(orl)); 1414 } 1415 1416 int 1417 linux_sys_setrlimit(struct lwp *l, const struct linux_sys_setrlimit_args *uap, register_t *retval) 1418 { 1419 /* { 1420 syscallarg(int) which; 1421 # ifdef LINUX_LARGEFILE64 1422 syscallarg(struct rlimit *) rlp; 1423 # else 1424 syscallarg(struct orlimit *) rlp; 1425 # endif 1426 } */ 1427 struct rlimit rl; 1428 # ifdef LINUX_LARGEFILE64 1429 struct rlimit orl; 1430 # else 1431 struct orlimit orl; 1432 # endif 1433 int error; 1434 int which; 1435 1436 if ((error = copyin(SCARG(uap, rlp), &orl, sizeof(orl))) != 0) 1437 return error; 1438 1439 which = linux_to_bsd_limit(SCARG(uap, which)); 1440 if (which < 0) 1441 return -which; 1442 1443 linux_to_bsd_rlimit(&rl, &orl); 1444 return dosetrlimit(l, l->l_proc, which, &rl); 1445 } 1446 1447 # if !defined(__aarch64__) && !defined(__mips__) && !defined(__amd64__) 1448 /* XXX: this doesn't look 100% common, at least mips doesn't have it */ 1449 int 1450 linux_sys_ugetrlimit(struct lwp *l, const struct linux_sys_ugetrlimit_args *uap, register_t *retval) 1451 { 1452 return linux_sys_getrlimit(l, (const void *)uap, retval); 1453 } 1454 # endif 1455 1456 int 1457 linux_sys_prlimit64(struct lwp *l, const struct linux_sys_prlimit64_args *uap, register_t *retval) 1458 { 1459 /* { 1460 syscallarg(pid_t) pid; 1461 syscallarg(int) witch; 1462 syscallarg(struct rlimit *) new_rlp; 1463 syscallarg(struct rlimit *) old_rlp; 1464 }; */ 1465 struct rlimit rl, nrl, orl; 1466 struct rlimit *p; 1467 int which; 1468 int error; 1469 1470 /* XXX: Cannot operate any process other than its own */ 1471 if (SCARG(uap, pid) != 0) 1472 return EPERM; 1473 1474 which = linux_to_bsd_limit(SCARG(uap, which)); 1475 if (which < 0) 1476 return -which; 1477 1478 p = SCARG(uap, old_rlp); 1479 if (p != NULL) { 1480 memset(&orl, 0, sizeof(orl)); 1481 bsd_to_linux_rlimit64(&orl, &l->l_proc->p_rlimit[which]); 1482 if ((error = copyout(&orl, p, sizeof(orl))) != 0) 1483 return error; 1484 } 1485 1486 p = SCARG(uap, new_rlp); 1487 if (p != NULL) { 1488 if ((error = copyin(p, &nrl, sizeof(nrl))) != 0) 1489 return error; 1490 1491 linux_to_bsd_rlimit(&rl, &nrl); 1492 return dosetrlimit(l, l->l_proc, which, &rl); 1493 } 1494 1495 return 0; 1496 } 1497 1498 /* 1499 * This gets called for unsupported syscalls. The difference to sys_nosys() 1500 * is that process does not get SIGSYS, the call just returns with ENOSYS. 1501 * This is the way Linux does it and glibc depends on this behaviour. 1502 */ 1503 int 1504 linux_sys_nosys(struct lwp *l, const void *v, register_t *retval) 1505 { 1506 return (ENOSYS); 1507 } 1508 1509 int 1510 linux_sys_getpriority(struct lwp *l, const struct linux_sys_getpriority_args *uap, register_t *retval) 1511 { 1512 /* { 1513 syscallarg(int) which; 1514 syscallarg(int) who; 1515 } */ 1516 struct sys_getpriority_args bsa; 1517 int error; 1518 1519 SCARG(&bsa, which) = SCARG(uap, which); 1520 SCARG(&bsa, who) = SCARG(uap, who); 1521 1522 if ((error = sys_getpriority(l, &bsa, retval))) 1523 return error; 1524 1525 *retval = NZERO - *retval; 1526 1527 return 0; 1528 } 1529 1530 int 1531 linux_do_sys_utimensat(struct lwp *l, int fd, const char *path, struct timespec *tsp, int flags, register_t *retval) 1532 { 1533 int follow, error; 1534 1535 follow = (flags & LINUX_AT_SYMLINK_NOFOLLOW) ? NOFOLLOW : FOLLOW; 1536 1537 if (path == NULL && fd != AT_FDCWD) { 1538 file_t *fp; 1539 1540 /* fd_getvnode() will use the descriptor for us */ 1541 if ((error = fd_getvnode(fd, &fp)) != 0) 1542 return error; 1543 error = do_sys_utimensat(l, AT_FDCWD, fp->f_data, NULL, 0, 1544 tsp, UIO_SYSSPACE); 1545 fd_putfile(fd); 1546 return error; 1547 } 1548 1549 return do_sys_utimensat(l, fd, NULL, path, follow, tsp, UIO_SYSSPACE); 1550 } 1551 1552 int 1553 linux_sys_utimensat(struct lwp *l, const struct linux_sys_utimensat_args *uap, 1554 register_t *retval) 1555 { 1556 /* { 1557 syscallarg(int) fd; 1558 syscallarg(const char *) path; 1559 syscallarg(const struct linux_timespec *) times; 1560 syscallarg(int) flag; 1561 } */ 1562 int error; 1563 struct linux_timespec lts[2]; 1564 struct timespec *tsp = NULL, ts[2]; 1565 1566 if (SCARG(uap, times)) { 1567 error = copyin(SCARG(uap, times), <s, sizeof(lts)); 1568 if (error != 0) 1569 return error; 1570 linux_to_native_timespec(&ts[0], <s[0]); 1571 linux_to_native_timespec(&ts[1], <s[1]); 1572 tsp = ts; 1573 } 1574 1575 return linux_do_sys_utimensat(l, SCARG(uap, fd), SCARG(uap, path), 1576 tsp, SCARG(uap, flag), retval); 1577 } 1578 1579 int 1580 linux_sys_futex(struct lwp *l, const struct linux_sys_futex_args *uap, 1581 register_t *retval) 1582 { 1583 /* { 1584 syscallarg(int *) uaddr; 1585 syscallarg(int) op; 1586 syscallarg(int) val; 1587 syscallarg(const struct linux_timespec *) timeout; 1588 syscallarg(int *) uaddr2; 1589 syscallarg(int) val3; 1590 } */ 1591 struct linux_timespec lts; 1592 struct timespec ts, *tsp = NULL; 1593 int val2 = 0; 1594 int error; 1595 1596 /* 1597 * Linux overlays the "timeout" field and the "val2" field. 1598 * "timeout" is only valid for FUTEX_WAIT and FUTEX_WAIT_BITSET 1599 * on Linux. 1600 */ 1601 const int op = (SCARG(uap, op) & FUTEX_CMD_MASK); 1602 if ((op == FUTEX_WAIT || op == FUTEX_WAIT_BITSET) && 1603 SCARG(uap, timeout) != NULL) { 1604 if ((error = copyin(SCARG(uap, timeout), 1605 <s, sizeof(lts))) != 0) { 1606 return error; 1607 } 1608 linux_to_native_timespec(&ts, <s); 1609 tsp = &ts; 1610 } else { 1611 val2 = (int)(uintptr_t)SCARG(uap, timeout); 1612 } 1613 1614 return linux_do_futex(SCARG(uap, uaddr), SCARG(uap, op), 1615 SCARG(uap, val), tsp, SCARG(uap, uaddr2), val2, 1616 SCARG(uap, val3), retval); 1617 } 1618 1619 int 1620 linux_do_futex(int *uaddr, int op, int val, struct timespec *timeout, 1621 int *uaddr2, int val2, int val3, register_t *retval) 1622 { 1623 /* 1624 * Always clear FUTEX_PRIVATE_FLAG for Linux processes. 1625 * NetBSD-native futexes exist in different namespace 1626 * depending on FUTEX_PRIVATE_FLAG. This appears not 1627 * to be the case in Linux, and some futex users will 1628 * mix private and non-private ops on the same futex 1629 * object. 1630 */ 1631 return do_futex(uaddr, op & ~FUTEX_PRIVATE_FLAG, 1632 val, timeout, uaddr2, val2, val3, retval); 1633 } 1634 1635 #define LINUX_EFD_SEMAPHORE 0x0001 1636 #define LINUX_EFD_CLOEXEC LINUX_O_CLOEXEC 1637 #define LINUX_EFD_NONBLOCK LINUX_O_NONBLOCK 1638 1639 static int 1640 linux_do_eventfd2(struct lwp *l, unsigned int initval, int flags, 1641 register_t *retval) 1642 { 1643 int nflags = 0; 1644 1645 if (flags & ~(LINUX_EFD_SEMAPHORE | LINUX_EFD_CLOEXEC | 1646 LINUX_EFD_NONBLOCK)) { 1647 return EINVAL; 1648 } 1649 if (flags & LINUX_EFD_SEMAPHORE) { 1650 nflags |= EFD_SEMAPHORE; 1651 } 1652 if (flags & LINUX_EFD_CLOEXEC) { 1653 nflags |= EFD_CLOEXEC; 1654 } 1655 if (flags & LINUX_EFD_NONBLOCK) { 1656 nflags |= EFD_NONBLOCK; 1657 } 1658 1659 return do_eventfd(l, initval, nflags, retval); 1660 } 1661 1662 int 1663 linux_sys_eventfd(struct lwp *l, const struct linux_sys_eventfd_args *uap, 1664 register_t *retval) 1665 { 1666 /* { 1667 syscallarg(unsigned int) initval; 1668 } */ 1669 1670 return linux_do_eventfd2(l, SCARG(uap, initval), 0, retval); 1671 } 1672 1673 int 1674 linux_sys_eventfd2(struct lwp *l, const struct linux_sys_eventfd2_args *uap, 1675 register_t *retval) 1676 { 1677 /* { 1678 syscallarg(unsigned int) initval; 1679 syscallarg(int) flags; 1680 } */ 1681 1682 return linux_do_eventfd2(l, SCARG(uap, initval), SCARG(uap, flags), 1683 retval); 1684 } 1685 1686 #ifndef __aarch64__ 1687 /* 1688 * epoll_create(2). Check size and call sys_epoll_create1. 1689 */ 1690 int 1691 linux_sys_epoll_create(struct lwp *l, 1692 const struct linux_sys_epoll_create_args *uap, register_t *retval) 1693 { 1694 /* { 1695 syscallarg(int) size; 1696 } */ 1697 struct sys_epoll_create1_args ca; 1698 1699 /* 1700 * SCARG(uap, size) is unused. Linux just tests it and then 1701 * forgets it as well. 1702 */ 1703 if (SCARG(uap, size) <= 0) 1704 return EINVAL; 1705 1706 SCARG(&ca, flags) = 0; 1707 return sys_epoll_create1(l, &ca, retval); 1708 } 1709 #endif /* !__aarch64__ */ 1710 1711 /* 1712 * epoll_create1(2). Translate the flags and call sys_epoll_create1. 1713 */ 1714 int 1715 linux_sys_epoll_create1(struct lwp *l, 1716 const struct linux_sys_epoll_create1_args *uap, register_t *retval) 1717 { 1718 /* { 1719 syscallarg(int) flags; 1720 } */ 1721 struct sys_epoll_create1_args ca; 1722 1723 if ((SCARG(uap, flags) & ~(LINUX_O_CLOEXEC)) != 0) 1724 return EINVAL; 1725 1726 SCARG(&ca, flags) = 0; 1727 if ((SCARG(uap, flags) & LINUX_O_CLOEXEC) != 0) 1728 SCARG(&ca, flags) |= O_CLOEXEC; 1729 1730 return sys_epoll_create1(l, &ca, retval); 1731 } 1732 1733 /* 1734 * epoll_ctl(2). Copyin event and translate it if necessary and then 1735 * call epoll_ctl_common(). 1736 */ 1737 int 1738 linux_sys_epoll_ctl(struct lwp *l, const struct linux_sys_epoll_ctl_args *uap, 1739 register_t *retval) 1740 { 1741 /* { 1742 syscallarg(int) epfd; 1743 syscallarg(int) op; 1744 syscallarg(int) fd; 1745 syscallarg(struct linux_epoll_event *) event; 1746 } */ 1747 struct linux_epoll_event lee; 1748 struct epoll_event ee; 1749 struct epoll_event *eep; 1750 int error; 1751 1752 if (SCARG(uap, op) != EPOLL_CTL_DEL) { 1753 error = copyin(SCARG(uap, event), &lee, sizeof(lee)); 1754 if (error != 0) 1755 return error; 1756 1757 /* 1758 * On some architectures, struct linux_epoll_event and 1759 * struct epoll_event are packed differently... but otherwise 1760 * the contents are the same. 1761 */ 1762 ee.events = lee.events; 1763 ee.data = lee.data; 1764 1765 eep = ⅇ 1766 } else 1767 eep = NULL; 1768 1769 return epoll_ctl_common(l, retval, SCARG(uap, epfd), SCARG(uap, op), 1770 SCARG(uap, fd), eep); 1771 } 1772 1773 #ifndef __aarch64__ 1774 /* 1775 * epoll_wait(2). Call sys_epoll_pwait(). 1776 */ 1777 int 1778 linux_sys_epoll_wait(struct lwp *l, 1779 const struct linux_sys_epoll_wait_args *uap, register_t *retval) 1780 { 1781 /* { 1782 syscallarg(int) epfd; 1783 syscallarg(struct linux_epoll_event *) events; 1784 syscallarg(int) maxevents; 1785 syscallarg(int) timeout; 1786 } */ 1787 struct linux_sys_epoll_pwait_args ea; 1788 1789 SCARG(&ea, epfd) = SCARG(uap, epfd); 1790 SCARG(&ea, events) = SCARG(uap, events); 1791 SCARG(&ea, maxevents) = SCARG(uap, maxevents); 1792 SCARG(&ea, timeout) = SCARG(uap, timeout); 1793 SCARG(&ea, sigmask) = NULL; 1794 1795 return linux_sys_epoll_pwait(l, &ea, retval); 1796 } 1797 #endif /* !__aarch64__ */ 1798 1799 /* 1800 * Main body of epoll_pwait2(2). Translate timeout and sigmask and 1801 * call epoll_wait_common. 1802 */ 1803 static int 1804 linux_epoll_pwait2_common(struct lwp *l, register_t *retval, int epfd, 1805 struct linux_epoll_event *events, int maxevents, 1806 struct linux_timespec *timeout, const linux_sigset_t *sigmask) 1807 { 1808 struct timespec ts, *tsp; 1809 linux_sigset_t lss; 1810 sigset_t ss, *ssp; 1811 struct epoll_event *eep; 1812 struct linux_epoll_event *leep; 1813 int i, error; 1814 1815 if (maxevents <= 0 || maxevents > EPOLL_MAX_EVENTS) 1816 return EINVAL; 1817 1818 if (timeout != NULL) { 1819 linux_to_native_timespec(&ts, timeout); 1820 tsp = &ts; 1821 } else 1822 tsp = NULL; 1823 1824 if (sigmask != NULL) { 1825 error = copyin(sigmask, &lss, sizeof(lss)); 1826 if (error != 0) 1827 return error; 1828 1829 linux_to_native_sigset(&ss, &lss); 1830 ssp = &ss; 1831 } else 1832 ssp = NULL; 1833 1834 eep = kmem_alloc(maxevents * sizeof(*eep), KM_SLEEP); 1835 1836 error = epoll_wait_common(l, retval, epfd, eep, maxevents, tsp, 1837 ssp); 1838 if (error == 0 && *retval > 0) { 1839 leep = kmem_alloc((*retval) * sizeof(*leep), KM_SLEEP); 1840 1841 /* Translate the events (because of packing). */ 1842 for (i = 0; i < *retval; i++) { 1843 leep[i].events = eep[i].events; 1844 leep[i].data = eep[i].data; 1845 } 1846 1847 error = copyout(leep, events, (*retval) * sizeof(*leep)); 1848 kmem_free(leep, (*retval) * sizeof(*leep)); 1849 } 1850 1851 kmem_free(eep, maxevents * sizeof(*eep)); 1852 return error; 1853 } 1854 1855 /* 1856 * epoll_pwait(2). Translate timeout and call sys_epoll_pwait2. 1857 */ 1858 int 1859 linux_sys_epoll_pwait(struct lwp *l, 1860 const struct linux_sys_epoll_pwait_args *uap, register_t *retval) 1861 { 1862 /* { 1863 syscallarg(int) epfd; 1864 syscallarg(struct linux_epoll_event *) events; 1865 syscallarg(int) maxevents; 1866 syscallarg(int) timeout; 1867 syscallarg(linux_sigset_t *) sigmask; 1868 } */ 1869 struct linux_timespec lts, *ltsp; 1870 const int timeout = SCARG(uap, timeout); 1871 1872 if (timeout >= 0) { 1873 /* Convert from milliseconds to timespec. */ 1874 lts.tv_sec = timeout / 1000; 1875 lts.tv_nsec = (timeout % 1000) * 1000000; 1876 1877 ltsp = <s; 1878 } else 1879 ltsp = NULL; 1880 1881 return linux_epoll_pwait2_common(l, retval, SCARG(uap, epfd), 1882 SCARG(uap, events), SCARG(uap, maxevents), ltsp, 1883 SCARG(uap, sigmask)); 1884 } 1885 1886 1887 /* 1888 * epoll_pwait2(2). Copyin timeout and call linux_epoll_pwait2_common(). 1889 */ 1890 int 1891 linux_sys_epoll_pwait2(struct lwp *l, 1892 const struct linux_sys_epoll_pwait2_args *uap, register_t *retval) 1893 { 1894 /* { 1895 syscallarg(int) epfd; 1896 syscallarg(struct linux_epoll_event *) events; 1897 syscallarg(int) maxevents; 1898 syscallarg(struct linux_timespec *) timeout; 1899 syscallarg(linux_sigset_t *) sigmask; 1900 } */ 1901 struct linux_timespec lts, *ltsp; 1902 int error; 1903 1904 if (SCARG(uap, timeout) != NULL) { 1905 error = copyin(SCARG(uap, timeout), <s, sizeof(lts)); 1906 if (error != 0) 1907 return error; 1908 1909 ltsp = <s; 1910 } else 1911 ltsp = NULL; 1912 1913 return linux_epoll_pwait2_common(l, retval, SCARG(uap, epfd), 1914 SCARG(uap, events), SCARG(uap, maxevents), ltsp, 1915 SCARG(uap, sigmask)); 1916 } 1917 1918 #define LINUX_MFD_CLOEXEC 0x0001U 1919 #define LINUX_MFD_ALLOW_SEALING 0x0002U 1920 #define LINUX_MFD_HUGETLB 0x0004U 1921 #define LINUX_MFD_NOEXEC_SEAL 0x0008U 1922 #define LINUX_MFD_EXEC 0x0010U 1923 #define LINUX_MFD_HUGE_FLAGS (0x3f << 26) 1924 1925 #define LINUX_MFD_ALL_FLAGS (LINUX_MFD_CLOEXEC|LINUX_MFD_ALLOW_SEALING \ 1926 |LINUX_MFD_HUGETLB|LINUX_MFD_NOEXEC_SEAL \ 1927 |LINUX_MFD_EXEC|LINUX_MFD_HUGE_FLAGS) 1928 #define LINUX_MFD_KNOWN_FLAGS (LINUX_MFD_CLOEXEC|LINUX_MFD_ALLOW_SEALING) 1929 1930 #define LINUX_MFD_NAME_MAX 249 1931 1932 /* 1933 * memfd_create(2). Do some error checking and then call NetBSD's 1934 * version. 1935 */ 1936 int 1937 linux_sys_memfd_create(struct lwp *l, 1938 const struct linux_sys_memfd_create_args *uap, register_t *retval) 1939 { 1940 /* { 1941 syscallarg(const char *) name; 1942 syscallarg(unsigned int) flags; 1943 } */ 1944 int error; 1945 char *pbuf; 1946 struct sys_memfd_create_args muap; 1947 const unsigned int lflags = SCARG(uap, flags); 1948 1949 KASSERT(LINUX_MFD_NAME_MAX < NAME_MAX); /* sanity check */ 1950 1951 if (lflags & ~LINUX_MFD_ALL_FLAGS) 1952 return EINVAL; 1953 if ((lflags & LINUX_MFD_HUGE_FLAGS) != 0 && 1954 (lflags & LINUX_MFD_HUGETLB) == 0) 1955 return EINVAL; 1956 if ((lflags & LINUX_MFD_HUGETLB) && (lflags & LINUX_MFD_ALLOW_SEALING)) 1957 return EINVAL; 1958 1959 /* Linux has a stricter limit for name size */ 1960 pbuf = PNBUF_GET(); 1961 error = copyinstr(SCARG(uap, name), pbuf, LINUX_MFD_NAME_MAX+1, NULL); 1962 PNBUF_PUT(pbuf); 1963 pbuf = NULL; 1964 if (error != 0) { 1965 if (error == ENAMETOOLONG) 1966 error = EINVAL; 1967 return error; 1968 } 1969 1970 if (lflags & ~LINUX_MFD_KNOWN_FLAGS) { 1971 DPRINTF(("linux_sys_memfd_create: ignored flags %x\n", 1972 lflags & ~LINUX_MFD_KNOWN_FLAGS)); 1973 } 1974 1975 SCARG(&muap, name) = SCARG(uap, name); 1976 SCARG(&muap, flags) = lflags & LINUX_MFD_KNOWN_FLAGS; 1977 1978 return sys_memfd_create(l, &muap, retval); 1979 } 1980 1981 #define LINUX_CLOSE_RANGE_UNSHARE 0x02U 1982 #define LINUX_CLOSE_RANGE_CLOEXEC 0x04U 1983 1984 /* 1985 * close_range(2). 1986 */ 1987 int 1988 linux_sys_close_range(struct lwp *l, 1989 const struct linux_sys_close_range_args *uap, register_t *retval) 1990 { 1991 /* { 1992 syscallarg(unsigned int) first; 1993 syscallarg(unsigned int) last; 1994 syscallarg(unsigned int) flags; 1995 } */ 1996 unsigned int fd, last; 1997 file_t *fp; 1998 filedesc_t *fdp; 1999 const unsigned int flags = SCARG(uap, flags); 2000 2001 if (flags & ~(LINUX_CLOSE_RANGE_CLOEXEC|LINUX_CLOSE_RANGE_UNSHARE)) 2002 return EINVAL; 2003 if (SCARG(uap, first) > SCARG(uap, last)) 2004 return EINVAL; 2005 2006 if (flags & LINUX_CLOSE_RANGE_UNSHARE) { 2007 fdp = fd_copy(); 2008 fd_free(); 2009 l->l_proc->p_fd = fdp; 2010 l->l_fd = fdp; 2011 } 2012 2013 last = MIN(SCARG(uap, last), l->l_proc->p_fd->fd_lastfile); 2014 for (fd = SCARG(uap, first); fd <= last; fd++) { 2015 fp = fd_getfile(fd); 2016 if (fp == NULL) 2017 continue; 2018 2019 if (flags & LINUX_CLOSE_RANGE_CLOEXEC) { 2020 fd_set_exclose(l, fd, true); 2021 fd_putfile(fd); 2022 } else 2023 fd_close(fd); 2024 } 2025 2026 return 0; 2027 } 2028 2029 /* 2030 * readahead(2). Call posix_fadvise with POSIX_FADV_WILLNEED with some extra 2031 * error checking. 2032 */ 2033 int 2034 linux_sys_readahead(struct lwp *l, const struct linux_sys_readahead_args *uap, 2035 register_t *retval) 2036 { 2037 /* { 2038 syscallarg(int) fd; 2039 syscallarg(off_t) offset; 2040 syscallarg(size_t) count; 2041 } */ 2042 file_t *fp; 2043 int error = 0; 2044 const int fd = SCARG(uap, fd); 2045 2046 fp = fd_getfile(fd); 2047 if (fp == NULL) 2048 return EBADF; 2049 if ((fp->f_flag & FREAD) == 0) 2050 error = EBADF; 2051 else if (fp->f_type != DTYPE_VNODE || fp->f_vnode->v_type != VREG) 2052 error = EINVAL; 2053 fd_putfile(fd); 2054 if (error != 0) 2055 return error; 2056 2057 return do_posix_fadvise(fd, SCARG(uap, offset), SCARG(uap, count), 2058 POSIX_FADV_WILLNEED); 2059 } 2060