1 /* This file contains a collection of miscellaneous procedures. Some of them 2 * perform simple system calls. Some others do a little part of system calls 3 * that are mostly performed by the Memory Manager. 4 * 5 * The entry points into this file are 6 * do_fcntl: perform the FCNTL system call 7 * do_sync: perform the SYNC system call 8 * do_fsync: perform the FSYNC system call 9 * pm_setsid: perform VFS's side of setsid system call 10 * pm_reboot: sync disks and prepare for shutdown 11 * pm_fork: adjust the tables after PM has performed a FORK system call 12 * do_exec: handle files with FD_CLOEXEC on after PM has done an EXEC 13 * do_exit: a process has exited; note that in the tables 14 * do_set: set uid or gid for some process 15 * do_revive: revive a process that was waiting for something (e.g. TTY) 16 * do_svrctl: file system control 17 * do_getsysinfo: request copy of FS data structure 18 * pm_dumpcore: create a core dump 19 */ 20 21 #include "fs.h" 22 #include <fcntl.h> 23 #include <assert.h> 24 #include <unistd.h> 25 #include <string.h> 26 #include <minix/callnr.h> 27 #include <minix/safecopies.h> 28 #include <minix/endpoint.h> 29 #include <minix/com.h> 30 #include <minix/sysinfo.h> 31 #include <minix/u64.h> 32 #include <sys/ptrace.h> 33 #include <sys/svrctl.h> 34 #include <sys/resource.h> 35 #include "file.h" 36 #include <minix/vfsif.h> 37 #include "vnode.h" 38 #include "vmnt.h" 39 40 #define CORE_NAME "core" 41 #define CORE_MODE 0777 /* mode to use on core image files */ 42 43 #if ENABLE_SYSCALL_STATS 44 unsigned long calls_stats[NR_VFS_CALLS]; 45 #endif 46 47 static void free_proc(int flags); 48 49 /*===========================================================================* 50 * do_getsysinfo * 51 *===========================================================================*/ 52 int do_getsysinfo(void) 53 { 54 struct fproc *rfp; 55 struct fproc_light *rfpl; 56 vir_bytes src_addr, dst_addr; 57 size_t len, buf_size; 58 int what; 59 60 what = job_m_in.m_lsys_getsysinfo.what; 61 dst_addr = job_m_in.m_lsys_getsysinfo.where; 62 buf_size = job_m_in.m_lsys_getsysinfo.size; 63 64 /* Only su may call do_getsysinfo. This call may leak information (and is not 65 * stable enough to be part of the API/ABI). In the future, requests from 66 * non-system processes should be denied. 67 */ 68 69 if (!super_user) return(EPERM); 70 71 switch(what) { 72 case SI_PROC_TAB: 73 src_addr = (vir_bytes) fproc; 74 len = sizeof(struct fproc) * NR_PROCS; 75 break; 76 case SI_DMAP_TAB: 77 src_addr = (vir_bytes) dmap; 78 len = sizeof(struct dmap) * NR_DEVICES; 79 break; 80 case SI_PROCLIGHT_TAB: 81 /* Fill the light process table for the MIB service upon request. */ 82 rfpl = &fproc_light[0]; 83 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++, rfpl++) { 84 rfpl->fpl_tty = rfp->fp_tty; 85 rfpl->fpl_blocked_on = rfp->fp_blocked_on; 86 if (rfp->fp_blocked_on == FP_BLOCKED_ON_CDEV) 87 rfpl->fpl_task = rfp->fp_cdev.endpt; 88 else 89 rfpl->fpl_task = NONE; 90 } 91 src_addr = (vir_bytes) fproc_light; 92 len = sizeof(fproc_light); 93 break; 94 #if ENABLE_SYSCALL_STATS 95 case SI_CALL_STATS: 96 src_addr = (vir_bytes) calls_stats; 97 len = sizeof(calls_stats); 98 break; 99 #endif 100 default: 101 return(EINVAL); 102 } 103 104 if (len != buf_size) 105 return(EINVAL); 106 107 return sys_datacopy_wrapper(SELF, src_addr, who_e, dst_addr, len); 108 } 109 110 /*===========================================================================* 111 * do_fcntl * 112 *===========================================================================*/ 113 int do_fcntl(void) 114 { 115 /* Perform the fcntl(fd, cmd, ...) system call. */ 116 struct filp *f; 117 int fd, new_fd, fl, r = OK, fcntl_req, fcntl_argx; 118 vir_bytes addr; 119 tll_access_t locktype; 120 121 fd = job_m_in.m_lc_vfs_fcntl.fd; 122 fcntl_req = job_m_in.m_lc_vfs_fcntl.cmd; 123 fcntl_argx = job_m_in.m_lc_vfs_fcntl.arg_int; 124 addr = job_m_in.m_lc_vfs_fcntl.arg_ptr; 125 126 /* Is the file descriptor valid? */ 127 locktype = (fcntl_req == F_FREESP) ? VNODE_WRITE : VNODE_READ; 128 if ((f = get_filp(fd, locktype)) == NULL) 129 return(err_code); 130 131 switch (fcntl_req) { 132 case F_DUPFD: 133 case F_DUPFD_CLOEXEC: 134 /* This replaces the old dup() system call. */ 135 if (fcntl_argx < 0 || fcntl_argx >= OPEN_MAX) r = EINVAL; 136 else if ((r = get_fd(fp, fcntl_argx, 0, &new_fd, NULL)) == OK) { 137 f->filp_count++; 138 fp->fp_filp[new_fd] = f; 139 assert(!FD_ISSET(new_fd, &fp->fp_cloexec_set)); 140 if (fcntl_req == F_DUPFD_CLOEXEC) 141 FD_SET(new_fd, &fp->fp_cloexec_set); 142 r = new_fd; 143 } 144 break; 145 146 case F_GETFD: 147 /* Get close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */ 148 r = 0; 149 if (FD_ISSET(fd, &fp->fp_cloexec_set)) 150 r = FD_CLOEXEC; 151 break; 152 153 case F_SETFD: 154 /* Set close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */ 155 if (fcntl_argx & FD_CLOEXEC) 156 FD_SET(fd, &fp->fp_cloexec_set); 157 else 158 FD_CLR(fd, &fp->fp_cloexec_set); 159 break; 160 161 case F_GETFL: 162 /* Get file status flags (O_NONBLOCK and O_APPEND). */ 163 fl = f->filp_flags & (O_NONBLOCK | O_APPEND | O_ACCMODE); 164 r = fl; 165 break; 166 167 case F_SETFL: 168 /* Set file status flags (O_NONBLOCK and O_APPEND). */ 169 fl = O_NONBLOCK | O_APPEND; 170 f->filp_flags = (f->filp_flags & ~fl) | (fcntl_argx & fl); 171 break; 172 173 case F_GETLK: 174 case F_SETLK: 175 case F_SETLKW: 176 /* Set or clear a file lock. */ 177 r = lock_op(fd, fcntl_req, addr); 178 break; 179 180 case F_FREESP: 181 { 182 /* Free a section of a file */ 183 off_t start, end, offset; 184 struct flock flock_arg; 185 186 /* Check if it's a regular file. */ 187 if (!S_ISREG(f->filp_vno->v_mode)) r = EINVAL; 188 else if (!(f->filp_mode & W_BIT)) r = EBADF; 189 else { 190 /* Copy flock data from userspace. */ 191 r = sys_datacopy_wrapper(who_e, addr, SELF, 192 (vir_bytes)&flock_arg, sizeof(flock_arg)); 193 } 194 195 if (r != OK) break; 196 197 /* Convert starting offset to signed. */ 198 offset = (off_t) flock_arg.l_start; 199 200 /* Figure out starting position base. */ 201 switch(flock_arg.l_whence) { 202 case SEEK_SET: start = 0; break; 203 case SEEK_CUR: start = f->filp_pos; break; 204 case SEEK_END: start = f->filp_vno->v_size; break; 205 default: r = EINVAL; 206 } 207 if (r != OK) break; 208 209 /* Check for overflow or underflow. */ 210 if (offset > 0 && start + offset < start) r = EINVAL; 211 else if (offset < 0 && start + offset > start) r = EINVAL; 212 else { 213 start += offset; 214 if (start < 0) r = EINVAL; 215 } 216 if (r != OK) break; 217 218 if (flock_arg.l_len != 0) { 219 if (start >= f->filp_vno->v_size) r = EINVAL; 220 else if ((end = start + flock_arg.l_len) <= start) r = EINVAL; 221 else if (end > f->filp_vno->v_size) end = f->filp_vno->v_size; 222 } else { 223 end = 0; 224 } 225 if (r != OK) break; 226 227 r = req_ftrunc(f->filp_vno->v_fs_e, f->filp_vno->v_inode_nr,start,end); 228 229 if (r == OK && flock_arg.l_len == 0) 230 f->filp_vno->v_size = start; 231 232 break; 233 } 234 case F_GETNOSIGPIPE: 235 r = !!(f->filp_flags & O_NOSIGPIPE); 236 break; 237 case F_SETNOSIGPIPE: 238 if (fcntl_argx) 239 f->filp_flags |= O_NOSIGPIPE; 240 else 241 f->filp_flags &= ~O_NOSIGPIPE; 242 break; 243 case F_FLUSH_FS_CACHE: 244 { 245 struct vnode *vn = f->filp_vno; 246 mode_t mode = f->filp_vno->v_mode; 247 if (!super_user) { 248 r = EPERM; 249 } else if (S_ISBLK(mode)) { 250 /* Block device; flush corresponding device blocks. */ 251 r = req_flush(vn->v_bfs_e, vn->v_sdev); 252 } else if (S_ISREG(mode) || S_ISDIR(mode)) { 253 /* Directory or regular file; flush hosting FS blocks. */ 254 r = req_flush(vn->v_fs_e, vn->v_dev); 255 } else { 256 /* Remaining cases.. Meaning unclear. */ 257 r = ENODEV; 258 } 259 break; 260 } 261 default: 262 r = EINVAL; 263 } 264 265 unlock_filp(f); 266 return(r); 267 } 268 269 /*===========================================================================* 270 * do_sync * 271 *===========================================================================*/ 272 int do_sync(void) 273 { 274 struct vmnt *vmp; 275 int r = OK; 276 277 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) { 278 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) 279 break; 280 if (vmp->m_dev != NO_DEV && vmp->m_fs_e != NONE && 281 vmp->m_root_node != NULL) { 282 req_sync(vmp->m_fs_e); 283 } 284 unlock_vmnt(vmp); 285 } 286 287 return(r); 288 } 289 290 /*===========================================================================* 291 * do_fsync * 292 *===========================================================================*/ 293 int do_fsync(void) 294 { 295 /* Perform the fsync() system call. */ 296 struct filp *rfilp; 297 struct vmnt *vmp; 298 dev_t dev; 299 int fd, r = OK; 300 301 fd = job_m_in.m_lc_vfs_fsync.fd; 302 303 if ((rfilp = get_filp(fd, VNODE_READ)) == NULL) 304 return(err_code); 305 306 dev = rfilp->filp_vno->v_dev; 307 unlock_filp(rfilp); 308 309 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) { 310 if (vmp->m_dev != dev) continue; 311 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) 312 break; 313 if (vmp->m_dev != NO_DEV && vmp->m_dev == dev && 314 vmp->m_fs_e != NONE && vmp->m_root_node != NULL) { 315 316 req_sync(vmp->m_fs_e); 317 } 318 unlock_vmnt(vmp); 319 } 320 321 return(r); 322 } 323 324 int dupvm(struct fproc *rfp, int pfd, int *vmfd, struct filp **newfilp) 325 { 326 int result, procfd; 327 struct filp *f = NULL; 328 struct fproc *vmf = fproc_addr(VM_PROC_NR); 329 330 *newfilp = NULL; 331 332 if ((f = get_filp2(rfp, pfd, VNODE_READ)) == NULL) { 333 printf("VFS dupvm: get_filp2 failed\n"); 334 return EBADF; 335 } 336 337 if(!(f->filp_vno->v_vmnt->m_fs_flags & RES_HASPEEK)) { 338 unlock_filp(f); 339 #if 0 /* Noisy diagnostic for mmap() by ld.so */ 340 printf("VFS dupvm: no peek available\n"); 341 #endif 342 return EINVAL; 343 } 344 345 assert(f->filp_vno); 346 assert(f->filp_vno->v_vmnt); 347 348 if (!S_ISREG(f->filp_vno->v_mode) && !S_ISBLK(f->filp_vno->v_mode)) { 349 printf("VFS: mmap regular/blockdev only; dev 0x%llx ino %llu has mode 0%o\n", 350 f->filp_vno->v_dev, f->filp_vno->v_inode_nr, f->filp_vno->v_mode); 351 unlock_filp(f); 352 return EINVAL; 353 } 354 355 /* get free FD in VM */ 356 if((result=get_fd(vmf, 0, 0, &procfd, NULL)) != OK) { 357 unlock_filp(f); 358 printf("VFS dupvm: getfd failed\n"); 359 return result; 360 } 361 362 *vmfd = procfd; 363 364 f->filp_count++; 365 assert(f->filp_count > 0); 366 vmf->fp_filp[procfd] = f; 367 368 *newfilp = f; 369 370 return OK; 371 } 372 373 /*===========================================================================* 374 * do_vm_call * 375 *===========================================================================*/ 376 int do_vm_call(void) 377 { 378 /* A call that VM does to VFS. 379 * We must reply with the fixed type VM_VFS_REPLY (and put our result info 380 * in the rest of the message) so VM can tell the difference between a 381 * request from VFS and a reply to this call. 382 */ 383 int req = job_m_in.VFS_VMCALL_REQ; 384 int req_fd = job_m_in.VFS_VMCALL_FD; 385 u32_t req_id = job_m_in.VFS_VMCALL_REQID; 386 endpoint_t ep = job_m_in.VFS_VMCALL_ENDPOINT; 387 u64_t offset = job_m_in.VFS_VMCALL_OFFSET; 388 u32_t length = job_m_in.VFS_VMCALL_LENGTH; 389 int result = OK; 390 int slot; 391 struct fproc *rfp; 392 #if !defined(NDEBUG) 393 struct fproc *vmf; 394 #endif /* !defined(NDEBUG) */ 395 struct filp *f = NULL; 396 int r; 397 398 if(job_m_in.m_source != VM_PROC_NR) 399 return ENOSYS; 400 401 if(isokendpt(ep, &slot) != OK) rfp = NULL; 402 else rfp = &fproc[slot]; 403 404 #if !defined(NDEBUG) 405 vmf = fproc_addr(VM_PROC_NR); 406 #endif /* !defined(NDEBUG) */ 407 assert(fp == vmf); 408 assert(rfp != vmf); 409 410 switch(req) { 411 case VMVFSREQ_FDLOOKUP: 412 { 413 int procfd; 414 415 /* Lookup fd in referenced process. */ 416 417 if(!rfp) { 418 printf("VFS: why isn't ep %d here?!\n", ep); 419 result = ESRCH; 420 goto reqdone; 421 } 422 423 if((result = dupvm(rfp, req_fd, &procfd, &f)) != OK) { 424 #if 0 /* Noisy diagnostic for mmap() by ld.so */ 425 printf("vfs: dupvm failed\n"); 426 #endif 427 goto reqdone; 428 } 429 430 if(S_ISBLK(f->filp_vno->v_mode)) { 431 assert(f->filp_vno->v_sdev != NO_DEV); 432 job_m_out.VMV_DEV = f->filp_vno->v_sdev; 433 job_m_out.VMV_INO = VMC_NO_INODE; 434 job_m_out.VMV_SIZE_PAGES = LONG_MAX; 435 } else { 436 job_m_out.VMV_DEV = f->filp_vno->v_dev; 437 job_m_out.VMV_INO = f->filp_vno->v_inode_nr; 438 job_m_out.VMV_SIZE_PAGES = 439 roundup(f->filp_vno->v_size, 440 PAGE_SIZE)/PAGE_SIZE; 441 } 442 443 job_m_out.VMV_FD = procfd; 444 445 result = OK; 446 447 break; 448 } 449 case VMVFSREQ_FDCLOSE: 450 { 451 result = close_fd(fp, req_fd); 452 if(result != OK) { 453 printf("VFS: VM fd close for fd %d, %d (%d)\n", 454 req_fd, fp->fp_endpoint, result); 455 } 456 break; 457 } 458 case VMVFSREQ_FDIO: 459 { 460 result = actual_lseek(fp, req_fd, SEEK_SET, offset, 461 NULL); 462 463 if(result == OK) { 464 result = actual_read_write_peek(fp, PEEKING, 465 req_fd, /* vir_bytes */ 0, length); 466 } 467 468 break; 469 } 470 default: 471 panic("VFS: bad request code from VM\n"); 472 break; 473 } 474 475 reqdone: 476 if(f) 477 unlock_filp(f); 478 479 /* fp is VM still. */ 480 assert(fp == vmf); 481 job_m_out.VMV_ENDPOINT = ep; 482 job_m_out.VMV_RESULT = result; 483 job_m_out.VMV_REQID = req_id; 484 485 /* Reply asynchronously as VM may not be able to receive 486 * an ipc_sendnb() message. 487 */ 488 job_m_out.m_type = VM_VFS_REPLY; 489 r = asynsend3(VM_PROC_NR, &job_m_out, 0); 490 if(r != OK) printf("VFS: couldn't asynsend3() to VM\n"); 491 492 /* VFS does not reply any further */ 493 return SUSPEND; 494 } 495 496 /*===========================================================================* 497 * pm_reboot * 498 *===========================================================================*/ 499 void 500 pm_reboot(void) 501 { 502 /* Perform the VFS side of the reboot call. This call is performed from the PM 503 * process context. 504 */ 505 message m_out; 506 int i, r; 507 struct fproc *rfp, *pmfp; 508 509 pmfp = fp; 510 511 do_sync(); 512 513 /* Do exit processing for all leftover processes and servers, but don't 514 * actually exit them (if they were really gone, PM will tell us about it). 515 * Skip processes that handle parts of the file system; we first need to give 516 * them the chance to unmount (which should be possible as all normal 517 * processes have no open files anymore). 518 */ 519 /* This is the only place where we allow special modification of "fp". The 520 * reboot procedure should really be implemented as a PM message broadcasted 521 * to all processes, so that each process will be shut down cleanly by a 522 * thread operating on its behalf. Doing everything here is simpler, but it 523 * requires an exception to the strict model of having "fp" be the process 524 * that owns the current worker thread. 525 */ 526 for (i = 0; i < NR_PROCS; i++) { 527 rfp = &fproc[i]; 528 529 /* Don't just free the proc right away, but let it finish what it was 530 * doing first */ 531 if (rfp != fp) lock_proc(rfp); 532 if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL) { 533 worker_set_proc(rfp); /* temporarily fake process context */ 534 free_proc(0); 535 worker_set_proc(pmfp); /* restore original process context */ 536 } 537 if (rfp != fp) unlock_proc(rfp); 538 } 539 540 do_sync(); 541 unmount_all(0 /* Don't force */); 542 543 /* Try to exit all processes again including File Servers */ 544 for (i = 0; i < NR_PROCS; i++) { 545 rfp = &fproc[i]; 546 547 /* Don't just free the proc right away, but let it finish what it was 548 * doing first */ 549 if (rfp != fp) lock_proc(rfp); 550 if (rfp->fp_endpoint != NONE) { 551 worker_set_proc(rfp); /* temporarily fake process context */ 552 free_proc(0); 553 worker_set_proc(pmfp); /* restore original process context */ 554 } 555 if (rfp != fp) unlock_proc(rfp); 556 } 557 558 do_sync(); 559 unmount_all(1 /* Force */); 560 561 /* Reply to PM for synchronization */ 562 memset(&m_out, 0, sizeof(m_out)); 563 564 m_out.m_type = VFS_PM_REBOOT_REPLY; 565 566 if ((r = ipc_send(PM_PROC_NR, &m_out)) != OK) 567 panic("pm_reboot: ipc_send failed: %d", r); 568 } 569 570 /*===========================================================================* 571 * pm_fork * 572 *===========================================================================*/ 573 void pm_fork(endpoint_t pproc, endpoint_t cproc, pid_t cpid) 574 { 575 /* Perform those aspects of the fork() system call that relate to files. 576 * In particular, let the child inherit its parent's file descriptors. 577 * The parent and child parameters tell who forked off whom. The file 578 * system uses the same slot numbers as the kernel. Only PM makes this call. 579 */ 580 struct fproc *cp, *pp; 581 int i, parentno, childno; 582 mutex_t c_fp_lock; 583 584 /* Check up-to-dateness of fproc. */ 585 okendpt(pproc, &parentno); 586 587 /* PM gives child endpoint, which implies process slot information. 588 * Don't call isokendpt, because that will verify if the endpoint 589 * number is correct in fproc, which it won't be. 590 */ 591 childno = _ENDPOINT_P(cproc); 592 if (childno < 0 || childno >= NR_PROCS) 593 panic("VFS: bogus child for forking: %d", cproc); 594 if (fproc[childno].fp_pid != PID_FREE) 595 panic("VFS: forking on top of in-use child: %d", childno); 596 597 /* Copy the parent's fproc struct to the child. */ 598 /* However, the mutex variables belong to a slot and must stay the same. */ 599 c_fp_lock = fproc[childno].fp_lock; 600 fproc[childno] = fproc[parentno]; 601 fproc[childno].fp_lock = c_fp_lock; 602 603 /* Increase the counters in the 'filp' table. */ 604 cp = &fproc[childno]; 605 pp = &fproc[parentno]; 606 607 for (i = 0; i < OPEN_MAX; i++) 608 if (cp->fp_filp[i] != NULL) cp->fp_filp[i]->filp_count++; 609 610 /* Fill in new process and endpoint id. */ 611 cp->fp_pid = cpid; 612 cp->fp_endpoint = cproc; 613 614 /* A forking process cannot possibly be suspended on anything. */ 615 assert(pp->fp_blocked_on == FP_BLOCKED_ON_NONE); 616 617 /* A child is not a process leader, not being revived, etc. */ 618 cp->fp_flags = FP_NOFLAGS; 619 620 /* Record the fact that both root and working dir have another user. */ 621 if (cp->fp_rd) dup_vnode(cp->fp_rd); 622 if (cp->fp_wd) dup_vnode(cp->fp_wd); 623 } 624 625 /*===========================================================================* 626 * free_proc * 627 *===========================================================================*/ 628 static void free_proc(int flags) 629 { 630 int i; 631 register struct fproc *rfp; 632 register struct filp *rfilp; 633 register struct vnode *vp; 634 dev_t dev; 635 636 if (fp->fp_endpoint == NONE) 637 panic("free_proc: already free"); 638 639 if (fp_is_blocked(fp)) 640 unpause(); 641 642 /* Loop on file descriptors, closing any that are open. */ 643 for (i = 0; i < OPEN_MAX; i++) { 644 (void) close_fd(fp, i); 645 } 646 647 /* Release root and working directories. */ 648 if (fp->fp_rd) { put_vnode(fp->fp_rd); fp->fp_rd = NULL; } 649 if (fp->fp_wd) { put_vnode(fp->fp_wd); fp->fp_wd = NULL; } 650 651 /* The rest of these actions is only done when processes actually exit. */ 652 if (!(flags & FP_EXITING)) return; 653 654 fp->fp_flags |= FP_EXITING; 655 656 /* Check if any process is SUSPENDed on this driver. 657 * If a driver exits, unmap its entries in the dmap table. 658 * (unmapping has to be done after the first step, because the 659 * dmap table is used in the first step.) 660 */ 661 unsuspend_by_endpt(fp->fp_endpoint); 662 dmap_unmap_by_endpt(fp->fp_endpoint); 663 664 worker_stop_by_endpt(fp->fp_endpoint); /* Unblock waiting threads */ 665 vmnt_unmap_by_endpt(fp->fp_endpoint); /* Invalidate open files if this 666 * was an active FS */ 667 668 /* If a session leader exits and it has a controlling tty, then revoke 669 * access to its controlling tty from all other processes using it. 670 */ 671 if ((fp->fp_flags & FP_SESLDR) && fp->fp_tty != 0) { 672 dev = fp->fp_tty; 673 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { 674 if(rfp->fp_pid == PID_FREE) continue; 675 if (rfp->fp_tty == dev) rfp->fp_tty = 0; 676 677 for (i = 0; i < OPEN_MAX; i++) { 678 if ((rfilp = rfp->fp_filp[i]) == NULL) continue; 679 if (rfilp->filp_mode == FILP_CLOSED) continue; 680 vp = rfilp->filp_vno; 681 if (!S_ISCHR(vp->v_mode)) continue; 682 if (vp->v_sdev != dev) continue; 683 lock_filp(rfilp, VNODE_READ); 684 (void) cdev_close(dev); /* Ignore any errors. */ 685 /* FIXME: missing select check */ 686 rfilp->filp_mode = FILP_CLOSED; 687 unlock_filp(rfilp); 688 } 689 } 690 } 691 692 /* Exit done. Mark slot as free. */ 693 fp->fp_endpoint = NONE; 694 fp->fp_pid = PID_FREE; 695 fp->fp_flags = FP_NOFLAGS; 696 } 697 698 /*===========================================================================* 699 * pm_exit * 700 *===========================================================================*/ 701 void pm_exit(void) 702 { 703 /* Perform the file system portion of the exit(status) system call. 704 * This function is called from the context of the exiting process. 705 */ 706 707 free_proc(FP_EXITING); 708 } 709 710 /*===========================================================================* 711 * pm_setgid * 712 *===========================================================================*/ 713 void 714 pm_setgid(endpoint_t proc_e, int egid, int rgid) 715 { 716 register struct fproc *tfp; 717 int slot; 718 719 okendpt(proc_e, &slot); 720 tfp = &fproc[slot]; 721 722 tfp->fp_effgid = egid; 723 tfp->fp_realgid = rgid; 724 } 725 726 727 /*===========================================================================* 728 * pm_setgroups * 729 *===========================================================================*/ 730 void 731 pm_setgroups(endpoint_t proc_e, int ngroups, gid_t *groups) 732 { 733 struct fproc *rfp; 734 int slot; 735 736 okendpt(proc_e, &slot); 737 rfp = &fproc[slot]; 738 if (ngroups * sizeof(gid_t) > sizeof(rfp->fp_sgroups)) 739 panic("VFS: pm_setgroups: too much data to copy"); 740 if (sys_datacopy_wrapper(who_e, (vir_bytes) groups, SELF, (vir_bytes) rfp->fp_sgroups, 741 ngroups * sizeof(gid_t)) == OK) { 742 rfp->fp_ngroups = ngroups; 743 } else 744 panic("VFS: pm_setgroups: datacopy failed"); 745 } 746 747 748 /*===========================================================================* 749 * pm_setuid * 750 *===========================================================================*/ 751 void 752 pm_setuid(endpoint_t proc_e, int euid, int ruid) 753 { 754 struct fproc *tfp; 755 int slot; 756 757 okendpt(proc_e, &slot); 758 tfp = &fproc[slot]; 759 760 tfp->fp_effuid = euid; 761 tfp->fp_realuid = ruid; 762 } 763 764 /*===========================================================================* 765 * pm_setsid * 766 *===========================================================================*/ 767 void pm_setsid(endpoint_t proc_e) 768 { 769 /* Perform the VFS side of the SETSID call, i.e. get rid of the controlling 770 * terminal of a process, and make the process a session leader. 771 */ 772 struct fproc *rfp; 773 int slot; 774 775 /* Make the process a session leader with no controlling tty. */ 776 okendpt(proc_e, &slot); 777 rfp = &fproc[slot]; 778 rfp->fp_flags |= FP_SESLDR; 779 rfp->fp_tty = 0; 780 } 781 782 /*===========================================================================* 783 * do_svrctl * 784 *===========================================================================*/ 785 int do_svrctl(void) 786 { 787 unsigned long svrctl; 788 vir_bytes ptr; 789 790 svrctl = job_m_in.m_lc_svrctl.request; 791 ptr = job_m_in.m_lc_svrctl.arg; 792 793 if (IOCGROUP(svrctl) != 'F') return(EINVAL); 794 795 switch (svrctl) { 796 case VFSSETPARAM: 797 case VFSGETPARAM: 798 { 799 struct sysgetenv sysgetenv; 800 char search_key[64]; 801 char val[64]; 802 int r, s; 803 804 /* Copy sysgetenv structure to VFS */ 805 if (sys_datacopy_wrapper(who_e, ptr, SELF, (vir_bytes) &sysgetenv, 806 sizeof(sysgetenv)) != OK) 807 return(EFAULT); 808 809 /* Basic sanity checking */ 810 if (svrctl == VFSSETPARAM) { 811 if (sysgetenv.keylen <= 0 || 812 sysgetenv.keylen > (sizeof(search_key) - 1) || 813 sysgetenv.vallen <= 0 || 814 sysgetenv.vallen >= sizeof(val)) { 815 return(EINVAL); 816 } 817 } 818 819 /* Copy parameter "key" */ 820 if ((s = sys_datacopy_wrapper(who_e, (vir_bytes) sysgetenv.key, 821 SELF, (vir_bytes) search_key, 822 sysgetenv.keylen)) != OK) 823 return(s); 824 search_key[sysgetenv.keylen] = '\0'; /* Limit string */ 825 826 /* Is it a parameter we know? */ 827 if (svrctl == VFSSETPARAM) { 828 if (!strcmp(search_key, "verbose")) { 829 int verbose_val; 830 if ((s = sys_datacopy_wrapper(who_e, 831 (vir_bytes) sysgetenv.val, SELF, 832 (vir_bytes) &val, sysgetenv.vallen)) != OK) 833 return(s); 834 val[sysgetenv.vallen] = '\0'; /* Limit string */ 835 verbose_val = atoi(val); 836 if (verbose_val < 0 || verbose_val > 4) { 837 return(EINVAL); 838 } 839 verbose = verbose_val; 840 r = OK; 841 } else { 842 r = ESRCH; 843 } 844 } else { /* VFSGETPARAM */ 845 char small_buf[60]; 846 847 r = ESRCH; 848 if (!strcmp(search_key, "print_traces")) { 849 mthread_stacktraces(); 850 sysgetenv.val = 0; 851 sysgetenv.vallen = 0; 852 r = OK; 853 } else if (!strcmp(search_key, "print_select")) { 854 select_dump(); 855 sysgetenv.val = 0; 856 sysgetenv.vallen = 0; 857 r = OK; 858 } else if (!strcmp(search_key, "active_threads")) { 859 int active = NR_WTHREADS - worker_available(); 860 snprintf(small_buf, sizeof(small_buf) - 1, 861 "%d", active); 862 sysgetenv.vallen = strlen(small_buf); 863 r = OK; 864 } 865 866 if (r == OK) { 867 if ((s = sys_datacopy_wrapper(SELF, 868 (vir_bytes) &sysgetenv, who_e, ptr, 869 sizeof(sysgetenv))) != OK) 870 return(s); 871 if (sysgetenv.val != 0) { 872 if ((s = sys_datacopy_wrapper(SELF, 873 (vir_bytes) small_buf, who_e, 874 (vir_bytes) sysgetenv.val, 875 sysgetenv.vallen)) != OK) 876 return(s); 877 } 878 } 879 } 880 881 return(r); 882 } 883 default: 884 return(EINVAL); 885 } 886 } 887 888 /*===========================================================================* 889 * pm_dumpcore * 890 *===========================================================================*/ 891 int pm_dumpcore(int csig, vir_bytes exe_name) 892 { 893 int r, core_fd; 894 struct filp *f; 895 char core_path[PATH_MAX]; 896 char proc_name[PROC_NAME_LEN]; 897 898 /* In effect, the coredump is generated through the use of calls as if made 899 * by the process itself. As such, the process must not be doing anything 900 * else. Therefore, if the process was blocked on anything, unblock it 901 * first. This step is the reason we cannot use this function to generate a 902 * core dump of a process while it is still running (i.e., without 903 * terminating it), as it changes the state of the process. 904 */ 905 if (fp_is_blocked(fp)) 906 unpause(); 907 908 /* open core file */ 909 snprintf(core_path, PATH_MAX, "%s.%d", CORE_NAME, fp->fp_pid); 910 r = core_fd = common_open(core_path, O_WRONLY | O_CREAT | O_TRUNC, 911 CORE_MODE, FALSE /*for_exec*/); 912 if (r < 0) goto core_exit; 913 914 /* get process name */ 915 r = sys_datacopy_wrapper(PM_PROC_NR, exe_name, VFS_PROC_NR, 916 (vir_bytes) proc_name, PROC_NAME_LEN); 917 if (r != OK) goto core_exit; 918 proc_name[PROC_NAME_LEN - 1] = '\0'; 919 920 /* write the core dump */ 921 f = get_filp(core_fd, VNODE_WRITE); 922 assert(f != NULL); 923 write_elf_core_file(f, csig, proc_name); 924 unlock_filp(f); 925 926 core_exit: 927 /* The core file descriptor will be closed as part of the process exit. */ 928 free_proc(FP_EXITING); 929 930 return(r); 931 } 932 933 /*===========================================================================* 934 * ds_event * 935 *===========================================================================*/ 936 void 937 ds_event(void) 938 { 939 char key[DS_MAX_KEYLEN]; 940 char *blkdrv_prefix = "drv.blk."; 941 char *chrdrv_prefix = "drv.chr."; 942 u32_t value; 943 int type, r, is_blk; 944 endpoint_t owner_endpoint; 945 946 /* Get the event and the owner from DS. */ 947 while ((r = ds_check(key, &type, &owner_endpoint)) == OK) { 948 /* Only check for block and character driver up events. */ 949 if (!strncmp(key, blkdrv_prefix, strlen(blkdrv_prefix))) { 950 is_blk = TRUE; 951 } else if (!strncmp(key, chrdrv_prefix, strlen(chrdrv_prefix))) { 952 is_blk = FALSE; 953 } else { 954 continue; 955 } 956 957 if ((r = ds_retrieve_u32(key, &value)) != OK) { 958 printf("VFS: ds_event: ds_retrieve_u32 failed\n"); 959 break; 960 } 961 if (value != DS_DRIVER_UP) continue; 962 963 /* Perform up. */ 964 dmap_endpt_up(owner_endpoint, is_blk); 965 } 966 967 if (r != ENOENT) printf("VFS: ds_event: ds_check failed: %d\n", r); 968 } 969 970 /* A function to be called on panic(). */ 971 void panic_hook(void) 972 { 973 printf("VFS mthread stacktraces:\n"); 974 mthread_stacktraces(); 975 } 976 977 /*===========================================================================* 978 * do_getrusage * 979 *===========================================================================*/ 980 int do_getrusage(void) 981 { 982 /* Obsolete vfs_getrusage(2) call from userland. The getrusage call is 983 * now fully handled by PM, and for any future fields that should be 984 * supplied by VFS, VFS should be queried by PM rather than by the user 985 * program directly. TODO: remove this call after the next release. 986 */ 987 return OK; 988 } 989