1 /* This file contains a collection of miscellaneous procedures. Some of them 2 * perform simple system calls. Some others do a little part of system calls 3 * that are mostly performed by the Memory Manager. 4 * 5 * The entry points into this file are 6 * do_fcntl: perform the FCNTL system call 7 * do_sync: perform the SYNC system call 8 * do_fsync: perform the FSYNC system call 9 * pm_setsid: perform VFS's side of setsid system call 10 * pm_reboot: sync disks and prepare for shutdown 11 * pm_fork: adjust the tables after PM has performed a FORK system call 12 * do_exec: handle files with FD_CLOEXEC on after PM has done an EXEC 13 * do_exit: a process has exited; note that in the tables 14 * do_set: set uid or gid for some process 15 * do_revive: revive a process that was waiting for something (e.g. TTY) 16 * do_svrctl: file system control 17 * do_getsysinfo: request copy of FS data structure 18 * pm_dumpcore: create a core dump 19 */ 20 21 #include "fs.h" 22 #include <fcntl.h> 23 #include <assert.h> 24 #include <unistd.h> 25 #include <string.h> 26 #include <minix/callnr.h> 27 #include <minix/safecopies.h> 28 #include <minix/endpoint.h> 29 #include <minix/com.h> 30 #include <minix/sysinfo.h> 31 #include <minix/u64.h> 32 #include <sys/ptrace.h> 33 #include <sys/svrctl.h> 34 #include <sys/resource.h> 35 #include "file.h" 36 #include <minix/vfsif.h> 37 #include "vnode.h" 38 #include "vmnt.h" 39 40 #define CORE_NAME "core" 41 #define CORE_MODE 0777 /* mode to use on core image files */ 42 43 #if ENABLE_SYSCALL_STATS 44 unsigned long calls_stats[NR_VFS_CALLS]; 45 #endif 46 47 static void free_proc(int flags); 48 49 /*===========================================================================* 50 * do_getsysinfo * 51 *===========================================================================*/ 52 int do_getsysinfo(void) 53 { 54 struct fproc *rfp; 55 struct fproc_light *rfpl; 56 vir_bytes src_addr, dst_addr; 57 size_t len, buf_size; 58 int what; 59 60 what = job_m_in.m_lsys_getsysinfo.what; 61 dst_addr = job_m_in.m_lsys_getsysinfo.where; 62 buf_size = job_m_in.m_lsys_getsysinfo.size; 63 64 /* Only su may call do_getsysinfo. This call may leak information (and is not 65 * stable enough to be part of the API/ABI). In the future, requests from 66 * non-system processes should be denied. 67 */ 68 69 if (!super_user) return(EPERM); 70 71 switch(what) { 72 case SI_PROC_TAB: 73 src_addr = (vir_bytes) fproc; 74 len = sizeof(struct fproc) * NR_PROCS; 75 break; 76 case SI_DMAP_TAB: 77 src_addr = (vir_bytes) dmap; 78 len = sizeof(struct dmap) * NR_DEVICES; 79 break; 80 case SI_PROCLIGHT_TAB: 81 /* Fill the light process table for the MIB service upon request. */ 82 rfpl = &fproc_light[0]; 83 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++, rfpl++) { 84 rfpl->fpl_tty = rfp->fp_tty; 85 rfpl->fpl_blocked_on = rfp->fp_blocked_on; 86 if (rfp->fp_blocked_on == FP_BLOCKED_ON_CDEV) 87 rfpl->fpl_task = rfp->fp_cdev.endpt; 88 else 89 rfpl->fpl_task = NONE; 90 } 91 src_addr = (vir_bytes) fproc_light; 92 len = sizeof(fproc_light); 93 break; 94 #if ENABLE_SYSCALL_STATS 95 case SI_CALL_STATS: 96 src_addr = (vir_bytes) calls_stats; 97 len = sizeof(calls_stats); 98 break; 99 #endif 100 default: 101 return(EINVAL); 102 } 103 104 if (len != buf_size) 105 return(EINVAL); 106 107 return sys_datacopy_wrapper(SELF, src_addr, who_e, dst_addr, len); 108 } 109 110 /*===========================================================================* 111 * do_fcntl * 112 *===========================================================================*/ 113 int do_fcntl(void) 114 { 115 /* Perform the fcntl(fd, cmd, ...) system call. */ 116 struct filp *f; 117 int fd, new_fd, fl, r = OK, fcntl_req, fcntl_argx; 118 vir_bytes addr; 119 tll_access_t locktype; 120 121 fd = job_m_in.m_lc_vfs_fcntl.fd; 122 fcntl_req = job_m_in.m_lc_vfs_fcntl.cmd; 123 fcntl_argx = job_m_in.m_lc_vfs_fcntl.arg_int; 124 addr = job_m_in.m_lc_vfs_fcntl.arg_ptr; 125 126 /* Is the file descriptor valid? */ 127 locktype = (fcntl_req == F_FREESP) ? VNODE_WRITE : VNODE_READ; 128 if ((f = get_filp(fd, locktype)) == NULL) 129 return(err_code); 130 131 switch (fcntl_req) { 132 case F_DUPFD: 133 case F_DUPFD_CLOEXEC: 134 /* This replaces the old dup() system call. */ 135 if (fcntl_argx < 0 || fcntl_argx >= OPEN_MAX) r = EINVAL; 136 else if ((r = get_fd(fp, fcntl_argx, 0, &new_fd, NULL)) == OK) { 137 f->filp_count++; 138 fp->fp_filp[new_fd] = f; 139 assert(!FD_ISSET(new_fd, &fp->fp_cloexec_set)); 140 if (fcntl_req == F_DUPFD_CLOEXEC) 141 FD_SET(new_fd, &fp->fp_cloexec_set); 142 r = new_fd; 143 } 144 break; 145 146 case F_GETFD: 147 /* Get close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */ 148 r = 0; 149 if (FD_ISSET(fd, &fp->fp_cloexec_set)) 150 r = FD_CLOEXEC; 151 break; 152 153 case F_SETFD: 154 /* Set close-on-exec flag (FD_CLOEXEC in POSIX Table 6-2). */ 155 if (fcntl_argx & FD_CLOEXEC) 156 FD_SET(fd, &fp->fp_cloexec_set); 157 else 158 FD_CLR(fd, &fp->fp_cloexec_set); 159 break; 160 161 case F_GETFL: 162 /* Get file status flags (O_NONBLOCK and O_APPEND). */ 163 fl = f->filp_flags & (O_NONBLOCK | O_APPEND | O_ACCMODE); 164 r = fl; 165 break; 166 167 case F_SETFL: 168 /* Set file status flags (O_NONBLOCK and O_APPEND). */ 169 fl = O_NONBLOCK | O_APPEND; 170 f->filp_flags = (f->filp_flags & ~fl) | (fcntl_argx & fl); 171 break; 172 173 case F_GETLK: 174 case F_SETLK: 175 case F_SETLKW: 176 /* Set or clear a file lock. */ 177 r = lock_op(fd, fcntl_req, addr); 178 break; 179 180 case F_FREESP: 181 { 182 /* Free a section of a file */ 183 off_t start, end, offset; 184 struct flock flock_arg; 185 186 /* Check if it's a regular file. */ 187 if (!S_ISREG(f->filp_vno->v_mode)) r = EINVAL; 188 else if (!(f->filp_mode & W_BIT)) r = EBADF; 189 else { 190 /* Copy flock data from userspace. */ 191 r = sys_datacopy_wrapper(who_e, addr, SELF, 192 (vir_bytes)&flock_arg, sizeof(flock_arg)); 193 } 194 195 if (r != OK) break; 196 197 /* Convert starting offset to signed. */ 198 offset = (off_t) flock_arg.l_start; 199 200 /* Figure out starting position base. */ 201 switch(flock_arg.l_whence) { 202 case SEEK_SET: start = 0; break; 203 case SEEK_CUR: start = f->filp_pos; break; 204 case SEEK_END: start = f->filp_vno->v_size; break; 205 default: r = EINVAL; 206 } 207 if (r != OK) break; 208 209 /* Check for overflow or underflow. */ 210 if (offset > 0 && start + offset < start) r = EINVAL; 211 else if (offset < 0 && start + offset > start) r = EINVAL; 212 else { 213 start += offset; 214 if (start < 0) r = EINVAL; 215 } 216 if (r != OK) break; 217 218 if (flock_arg.l_len != 0) { 219 if (start >= f->filp_vno->v_size) r = EINVAL; 220 else if ((end = start + flock_arg.l_len) <= start) r = EINVAL; 221 else if (end > f->filp_vno->v_size) end = f->filp_vno->v_size; 222 } else { 223 end = 0; 224 } 225 if (r != OK) break; 226 227 r = req_ftrunc(f->filp_vno->v_fs_e, f->filp_vno->v_inode_nr,start,end); 228 229 if (r == OK && flock_arg.l_len == 0) 230 f->filp_vno->v_size = start; 231 232 break; 233 } 234 case F_GETNOSIGPIPE: 235 r = !!(f->filp_flags & O_NOSIGPIPE); 236 break; 237 case F_SETNOSIGPIPE: 238 if (fcntl_argx) 239 f->filp_flags |= O_NOSIGPIPE; 240 else 241 f->filp_flags &= ~O_NOSIGPIPE; 242 break; 243 case F_FLUSH_FS_CACHE: 244 { 245 struct vnode *vn = f->filp_vno; 246 mode_t mode = f->filp_vno->v_mode; 247 if (!super_user) { 248 r = EPERM; 249 } else if (S_ISBLK(mode)) { 250 /* Block device; flush corresponding device blocks. */ 251 r = req_flush(vn->v_bfs_e, vn->v_sdev); 252 } else if (S_ISREG(mode) || S_ISDIR(mode)) { 253 /* Directory or regular file; flush hosting FS blocks. */ 254 r = req_flush(vn->v_fs_e, vn->v_dev); 255 } else { 256 /* Remaining cases.. Meaning unclear. */ 257 r = ENODEV; 258 } 259 break; 260 } 261 default: 262 r = EINVAL; 263 } 264 265 unlock_filp(f); 266 return(r); 267 } 268 269 /*===========================================================================* 270 * do_sync * 271 *===========================================================================*/ 272 int do_sync(void) 273 { 274 struct vmnt *vmp; 275 int r = OK; 276 277 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) { 278 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) 279 break; 280 if (vmp->m_dev != NO_DEV && vmp->m_fs_e != NONE && 281 vmp->m_root_node != NULL) { 282 req_sync(vmp->m_fs_e); 283 } 284 unlock_vmnt(vmp); 285 } 286 287 return(r); 288 } 289 290 /*===========================================================================* 291 * do_fsync * 292 *===========================================================================*/ 293 int do_fsync(void) 294 { 295 /* Perform the fsync() system call. */ 296 struct filp *rfilp; 297 struct vmnt *vmp; 298 dev_t dev; 299 int fd, r = OK; 300 301 fd = job_m_in.m_lc_vfs_fsync.fd; 302 303 if ((rfilp = get_filp(fd, VNODE_READ)) == NULL) 304 return(err_code); 305 306 dev = rfilp->filp_vno->v_dev; 307 unlock_filp(rfilp); 308 309 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) { 310 if (vmp->m_dev != dev) continue; 311 if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) 312 break; 313 if (vmp->m_dev != NO_DEV && vmp->m_dev == dev && 314 vmp->m_fs_e != NONE && vmp->m_root_node != NULL) { 315 316 req_sync(vmp->m_fs_e); 317 } 318 unlock_vmnt(vmp); 319 } 320 321 return(r); 322 } 323 324 int dupvm(struct fproc *rfp, int pfd, int *vmfd, struct filp **newfilp) 325 { 326 int result, procfd; 327 struct filp *f = NULL; 328 struct fproc *vmf = fproc_addr(VM_PROC_NR); 329 330 *newfilp = NULL; 331 332 if ((f = get_filp2(rfp, pfd, VNODE_READ)) == NULL) { 333 printf("VFS dupvm: get_filp2 failed\n"); 334 return EBADF; 335 } 336 337 if(!(f->filp_vno->v_vmnt->m_fs_flags & RES_HASPEEK)) { 338 unlock_filp(f); 339 #if 0 /* Noisy diagnostic for mmap() by ld.so */ 340 printf("VFS dupvm: no peek available\n"); 341 #endif 342 return EINVAL; 343 } 344 345 assert(f->filp_vno); 346 assert(f->filp_vno->v_vmnt); 347 348 if (!S_ISREG(f->filp_vno->v_mode) && !S_ISBLK(f->filp_vno->v_mode)) { 349 printf("VFS: mmap regular/blockdev only; dev 0x%llx ino %llu has mode 0%o\n", 350 f->filp_vno->v_dev, f->filp_vno->v_inode_nr, f->filp_vno->v_mode); 351 unlock_filp(f); 352 return EINVAL; 353 } 354 355 /* get free FD in VM */ 356 if((result=get_fd(vmf, 0, 0, &procfd, NULL)) != OK) { 357 unlock_filp(f); 358 printf("VFS dupvm: getfd failed\n"); 359 return result; 360 } 361 362 *vmfd = procfd; 363 364 f->filp_count++; 365 assert(f->filp_count > 0); 366 vmf->fp_filp[procfd] = f; 367 368 *newfilp = f; 369 370 return OK; 371 } 372 373 /*===========================================================================* 374 * do_vm_call * 375 *===========================================================================*/ 376 int do_vm_call(void) 377 { 378 /* A call that VM does to VFS. 379 * We must reply with the fixed type VM_VFS_REPLY (and put our result info 380 * in the rest of the message) so VM can tell the difference between a 381 * request from VFS and a reply to this call. 382 */ 383 int req = job_m_in.VFS_VMCALL_REQ; 384 int req_fd = job_m_in.VFS_VMCALL_FD; 385 u32_t req_id = job_m_in.VFS_VMCALL_REQID; 386 endpoint_t ep = job_m_in.VFS_VMCALL_ENDPOINT; 387 u64_t offset = job_m_in.VFS_VMCALL_OFFSET; 388 u32_t length = job_m_in.VFS_VMCALL_LENGTH; 389 int result = OK; 390 int slot; 391 struct fproc *rfp; 392 #if !defined(NDEBUG) 393 struct fproc *vmf; 394 #endif /* !defined(NDEBUG) */ 395 struct filp *f = NULL; 396 int r; 397 398 if(job_m_in.m_source != VM_PROC_NR) 399 return ENOSYS; 400 401 if(isokendpt(ep, &slot) != OK) rfp = NULL; 402 else rfp = &fproc[slot]; 403 404 #if !defined(NDEBUG) 405 vmf = fproc_addr(VM_PROC_NR); 406 #endif /* !defined(NDEBUG) */ 407 assert(fp == vmf); 408 assert(rfp != vmf); 409 410 switch(req) { 411 case VMVFSREQ_FDLOOKUP: 412 { 413 int procfd; 414 415 /* Lookup fd in referenced process. */ 416 417 if(!rfp) { 418 printf("VFS: why isn't ep %d here?!\n", ep); 419 result = ESRCH; 420 goto reqdone; 421 } 422 423 if((result = dupvm(rfp, req_fd, &procfd, &f)) != OK) { 424 #if 0 /* Noisy diagnostic for mmap() by ld.so */ 425 printf("vfs: dupvm failed\n"); 426 #endif 427 goto reqdone; 428 } 429 430 if(S_ISBLK(f->filp_vno->v_mode)) { 431 assert(f->filp_vno->v_sdev != NO_DEV); 432 job_m_out.VMV_DEV = f->filp_vno->v_sdev; 433 job_m_out.VMV_INO = VMC_NO_INODE; 434 job_m_out.VMV_SIZE_PAGES = LONG_MAX; 435 } else { 436 job_m_out.VMV_DEV = f->filp_vno->v_dev; 437 job_m_out.VMV_INO = f->filp_vno->v_inode_nr; 438 job_m_out.VMV_SIZE_PAGES = 439 roundup(f->filp_vno->v_size, 440 PAGE_SIZE)/PAGE_SIZE; 441 } 442 443 job_m_out.VMV_FD = procfd; 444 445 result = OK; 446 447 break; 448 } 449 case VMVFSREQ_FDCLOSE: 450 { 451 result = close_fd(fp, req_fd); 452 if(result != OK) { 453 printf("VFS: VM fd close for fd %d, %d (%d)\n", 454 req_fd, fp->fp_endpoint, result); 455 } 456 break; 457 } 458 case VMVFSREQ_FDIO: 459 { 460 result = actual_lseek(fp, req_fd, SEEK_SET, offset, 461 NULL); 462 463 if(result == OK) { 464 result = actual_read_write_peek(fp, PEEKING, 465 req_fd, /* vir_bytes */ 0, length); 466 } 467 468 break; 469 } 470 default: 471 panic("VFS: bad request code from VM\n"); 472 break; 473 } 474 475 reqdone: 476 if(f) 477 unlock_filp(f); 478 479 /* fp is VM still. */ 480 assert(fp == vmf); 481 job_m_out.VMV_ENDPOINT = ep; 482 job_m_out.VMV_RESULT = result; 483 job_m_out.VMV_REQID = req_id; 484 485 /* Reply asynchronously as VM may not be able to receive 486 * an ipc_sendnb() message. 487 */ 488 job_m_out.m_type = VM_VFS_REPLY; 489 r = asynsend3(VM_PROC_NR, &job_m_out, 0); 490 if(r != OK) printf("VFS: couldn't asynsend3() to VM\n"); 491 492 /* VFS does not reply any further */ 493 return SUSPEND; 494 } 495 496 /*===========================================================================* 497 * pm_reboot * 498 *===========================================================================*/ 499 void pm_reboot() 500 { 501 /* Perform the VFS side of the reboot call. This call is performed from the PM 502 * process context. 503 */ 504 message m_out; 505 int i, r; 506 struct fproc *rfp, *pmfp; 507 508 pmfp = fp; 509 510 do_sync(); 511 512 /* Do exit processing for all leftover processes and servers, but don't 513 * actually exit them (if they were really gone, PM will tell us about it). 514 * Skip processes that handle parts of the file system; we first need to give 515 * them the chance to unmount (which should be possible as all normal 516 * processes have no open files anymore). 517 */ 518 /* This is the only place where we allow special modification of "fp". The 519 * reboot procedure should really be implemented as a PM message broadcasted 520 * to all processes, so that each process will be shut down cleanly by a 521 * thread operating on its behalf. Doing everything here is simpler, but it 522 * requires an exception to the strict model of having "fp" be the process 523 * that owns the current worker thread. 524 */ 525 for (i = 0; i < NR_PROCS; i++) { 526 rfp = &fproc[i]; 527 528 /* Don't just free the proc right away, but let it finish what it was 529 * doing first */ 530 if (rfp != fp) lock_proc(rfp); 531 if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL) { 532 worker_set_proc(rfp); /* temporarily fake process context */ 533 free_proc(0); 534 worker_set_proc(pmfp); /* restore original process context */ 535 } 536 if (rfp != fp) unlock_proc(rfp); 537 } 538 539 do_sync(); 540 unmount_all(0 /* Don't force */); 541 542 /* Try to exit all processes again including File Servers */ 543 for (i = 0; i < NR_PROCS; i++) { 544 rfp = &fproc[i]; 545 546 /* Don't just free the proc right away, but let it finish what it was 547 * doing first */ 548 if (rfp != fp) lock_proc(rfp); 549 if (rfp->fp_endpoint != NONE) { 550 worker_set_proc(rfp); /* temporarily fake process context */ 551 free_proc(0); 552 worker_set_proc(pmfp); /* restore original process context */ 553 } 554 if (rfp != fp) unlock_proc(rfp); 555 } 556 557 do_sync(); 558 unmount_all(1 /* Force */); 559 560 /* Reply to PM for synchronization */ 561 memset(&m_out, 0, sizeof(m_out)); 562 563 m_out.m_type = VFS_PM_REBOOT_REPLY; 564 565 if ((r = ipc_send(PM_PROC_NR, &m_out)) != OK) 566 panic("pm_reboot: ipc_send failed: %d", r); 567 } 568 569 /*===========================================================================* 570 * pm_fork * 571 *===========================================================================*/ 572 void pm_fork(endpoint_t pproc, endpoint_t cproc, pid_t cpid) 573 { 574 /* Perform those aspects of the fork() system call that relate to files. 575 * In particular, let the child inherit its parent's file descriptors. 576 * The parent and child parameters tell who forked off whom. The file 577 * system uses the same slot numbers as the kernel. Only PM makes this call. 578 */ 579 struct fproc *cp, *pp; 580 int i, parentno, childno; 581 mutex_t c_fp_lock; 582 583 /* Check up-to-dateness of fproc. */ 584 okendpt(pproc, &parentno); 585 586 /* PM gives child endpoint, which implies process slot information. 587 * Don't call isokendpt, because that will verify if the endpoint 588 * number is correct in fproc, which it won't be. 589 */ 590 childno = _ENDPOINT_P(cproc); 591 if (childno < 0 || childno >= NR_PROCS) 592 panic("VFS: bogus child for forking: %d", cproc); 593 if (fproc[childno].fp_pid != PID_FREE) 594 panic("VFS: forking on top of in-use child: %d", childno); 595 596 /* Copy the parent's fproc struct to the child. */ 597 /* However, the mutex variables belong to a slot and must stay the same. */ 598 c_fp_lock = fproc[childno].fp_lock; 599 fproc[childno] = fproc[parentno]; 600 fproc[childno].fp_lock = c_fp_lock; 601 602 /* Increase the counters in the 'filp' table. */ 603 cp = &fproc[childno]; 604 pp = &fproc[parentno]; 605 606 for (i = 0; i < OPEN_MAX; i++) 607 if (cp->fp_filp[i] != NULL) cp->fp_filp[i]->filp_count++; 608 609 /* Fill in new process and endpoint id. */ 610 cp->fp_pid = cpid; 611 cp->fp_endpoint = cproc; 612 613 /* A forking process cannot possibly be suspended on anything. */ 614 assert(pp->fp_blocked_on == FP_BLOCKED_ON_NONE); 615 616 /* A child is not a process leader, not being revived, etc. */ 617 cp->fp_flags = FP_NOFLAGS; 618 619 /* Record the fact that both root and working dir have another user. */ 620 if (cp->fp_rd) dup_vnode(cp->fp_rd); 621 if (cp->fp_wd) dup_vnode(cp->fp_wd); 622 } 623 624 /*===========================================================================* 625 * free_proc * 626 *===========================================================================*/ 627 static void free_proc(int flags) 628 { 629 int i; 630 register struct fproc *rfp; 631 register struct filp *rfilp; 632 register struct vnode *vp; 633 dev_t dev; 634 635 if (fp->fp_endpoint == NONE) 636 panic("free_proc: already free"); 637 638 if (fp_is_blocked(fp)) 639 unpause(); 640 641 /* Loop on file descriptors, closing any that are open. */ 642 for (i = 0; i < OPEN_MAX; i++) { 643 (void) close_fd(fp, i); 644 } 645 646 /* Release root and working directories. */ 647 if (fp->fp_rd) { put_vnode(fp->fp_rd); fp->fp_rd = NULL; } 648 if (fp->fp_wd) { put_vnode(fp->fp_wd); fp->fp_wd = NULL; } 649 650 /* The rest of these actions is only done when processes actually exit. */ 651 if (!(flags & FP_EXITING)) return; 652 653 fp->fp_flags |= FP_EXITING; 654 655 /* Check if any process is SUSPENDed on this driver. 656 * If a driver exits, unmap its entries in the dmap table. 657 * (unmapping has to be done after the first step, because the 658 * dmap table is used in the first step.) 659 */ 660 unsuspend_by_endpt(fp->fp_endpoint); 661 dmap_unmap_by_endpt(fp->fp_endpoint); 662 663 worker_stop_by_endpt(fp->fp_endpoint); /* Unblock waiting threads */ 664 vmnt_unmap_by_endpt(fp->fp_endpoint); /* Invalidate open files if this 665 * was an active FS */ 666 667 /* If a session leader exits and it has a controlling tty, then revoke 668 * access to its controlling tty from all other processes using it. 669 */ 670 if ((fp->fp_flags & FP_SESLDR) && fp->fp_tty != 0) { 671 dev = fp->fp_tty; 672 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { 673 if(rfp->fp_pid == PID_FREE) continue; 674 if (rfp->fp_tty == dev) rfp->fp_tty = 0; 675 676 for (i = 0; i < OPEN_MAX; i++) { 677 if ((rfilp = rfp->fp_filp[i]) == NULL) continue; 678 if (rfilp->filp_mode == FILP_CLOSED) continue; 679 vp = rfilp->filp_vno; 680 if (!S_ISCHR(vp->v_mode)) continue; 681 if (vp->v_sdev != dev) continue; 682 lock_filp(rfilp, VNODE_READ); 683 (void) cdev_close(dev); /* Ignore any errors. */ 684 /* FIXME: missing select check */ 685 rfilp->filp_mode = FILP_CLOSED; 686 unlock_filp(rfilp); 687 } 688 } 689 } 690 691 /* Exit done. Mark slot as free. */ 692 fp->fp_endpoint = NONE; 693 fp->fp_pid = PID_FREE; 694 fp->fp_flags = FP_NOFLAGS; 695 } 696 697 /*===========================================================================* 698 * pm_exit * 699 *===========================================================================*/ 700 void pm_exit(void) 701 { 702 /* Perform the file system portion of the exit(status) system call. 703 * This function is called from the context of the exiting process. 704 */ 705 706 free_proc(FP_EXITING); 707 } 708 709 /*===========================================================================* 710 * pm_setgid * 711 *===========================================================================*/ 712 void pm_setgid(proc_e, egid, rgid) 713 endpoint_t proc_e; 714 int egid; 715 int rgid; 716 { 717 register struct fproc *tfp; 718 int slot; 719 720 okendpt(proc_e, &slot); 721 tfp = &fproc[slot]; 722 723 tfp->fp_effgid = egid; 724 tfp->fp_realgid = rgid; 725 } 726 727 728 /*===========================================================================* 729 * pm_setgroups * 730 *===========================================================================*/ 731 void pm_setgroups(proc_e, ngroups, groups) 732 endpoint_t proc_e; 733 int ngroups; 734 gid_t *groups; 735 { 736 struct fproc *rfp; 737 int slot; 738 739 okendpt(proc_e, &slot); 740 rfp = &fproc[slot]; 741 if (ngroups * sizeof(gid_t) > sizeof(rfp->fp_sgroups)) 742 panic("VFS: pm_setgroups: too much data to copy"); 743 if (sys_datacopy_wrapper(who_e, (vir_bytes) groups, SELF, (vir_bytes) rfp->fp_sgroups, 744 ngroups * sizeof(gid_t)) == OK) { 745 rfp->fp_ngroups = ngroups; 746 } else 747 panic("VFS: pm_setgroups: datacopy failed"); 748 } 749 750 751 /*===========================================================================* 752 * pm_setuid * 753 *===========================================================================*/ 754 void pm_setuid(proc_e, euid, ruid) 755 endpoint_t proc_e; 756 int euid; 757 int ruid; 758 { 759 struct fproc *tfp; 760 int slot; 761 762 okendpt(proc_e, &slot); 763 tfp = &fproc[slot]; 764 765 tfp->fp_effuid = euid; 766 tfp->fp_realuid = ruid; 767 } 768 769 /*===========================================================================* 770 * pm_setsid * 771 *===========================================================================*/ 772 void pm_setsid(endpoint_t proc_e) 773 { 774 /* Perform the VFS side of the SETSID call, i.e. get rid of the controlling 775 * terminal of a process, and make the process a session leader. 776 */ 777 struct fproc *rfp; 778 int slot; 779 780 /* Make the process a session leader with no controlling tty. */ 781 okendpt(proc_e, &slot); 782 rfp = &fproc[slot]; 783 rfp->fp_flags |= FP_SESLDR; 784 rfp->fp_tty = 0; 785 } 786 787 /*===========================================================================* 788 * do_svrctl * 789 *===========================================================================*/ 790 int do_svrctl(void) 791 { 792 unsigned long svrctl; 793 vir_bytes ptr; 794 795 svrctl = job_m_in.m_lc_svrctl.request; 796 ptr = job_m_in.m_lc_svrctl.arg; 797 798 if (IOCGROUP(svrctl) != 'F') return(EINVAL); 799 800 switch (svrctl) { 801 case VFSSETPARAM: 802 case VFSGETPARAM: 803 { 804 struct sysgetenv sysgetenv; 805 char search_key[64]; 806 char val[64]; 807 int r, s; 808 809 /* Copy sysgetenv structure to VFS */ 810 if (sys_datacopy_wrapper(who_e, ptr, SELF, (vir_bytes) &sysgetenv, 811 sizeof(sysgetenv)) != OK) 812 return(EFAULT); 813 814 /* Basic sanity checking */ 815 if (svrctl == VFSSETPARAM) { 816 if (sysgetenv.keylen <= 0 || 817 sysgetenv.keylen > (sizeof(search_key) - 1) || 818 sysgetenv.vallen <= 0 || 819 sysgetenv.vallen >= sizeof(val)) { 820 return(EINVAL); 821 } 822 } 823 824 /* Copy parameter "key" */ 825 if ((s = sys_datacopy_wrapper(who_e, (vir_bytes) sysgetenv.key, 826 SELF, (vir_bytes) search_key, 827 sysgetenv.keylen)) != OK) 828 return(s); 829 search_key[sysgetenv.keylen] = '\0'; /* Limit string */ 830 831 /* Is it a parameter we know? */ 832 if (svrctl == VFSSETPARAM) { 833 if (!strcmp(search_key, "verbose")) { 834 int verbose_val; 835 if ((s = sys_datacopy_wrapper(who_e, 836 (vir_bytes) sysgetenv.val, SELF, 837 (vir_bytes) &val, sysgetenv.vallen)) != OK) 838 return(s); 839 val[sysgetenv.vallen] = '\0'; /* Limit string */ 840 verbose_val = atoi(val); 841 if (verbose_val < 0 || verbose_val > 4) { 842 return(EINVAL); 843 } 844 verbose = verbose_val; 845 r = OK; 846 } else { 847 r = ESRCH; 848 } 849 } else { /* VFSGETPARAM */ 850 char small_buf[60]; 851 852 r = ESRCH; 853 if (!strcmp(search_key, "print_traces")) { 854 mthread_stacktraces(); 855 sysgetenv.val = 0; 856 sysgetenv.vallen = 0; 857 r = OK; 858 } else if (!strcmp(search_key, "print_select")) { 859 select_dump(); 860 sysgetenv.val = 0; 861 sysgetenv.vallen = 0; 862 r = OK; 863 } else if (!strcmp(search_key, "active_threads")) { 864 int active = NR_WTHREADS - worker_available(); 865 snprintf(small_buf, sizeof(small_buf) - 1, 866 "%d", active); 867 sysgetenv.vallen = strlen(small_buf); 868 r = OK; 869 } 870 871 if (r == OK) { 872 if ((s = sys_datacopy_wrapper(SELF, 873 (vir_bytes) &sysgetenv, who_e, ptr, 874 sizeof(sysgetenv))) != OK) 875 return(s); 876 if (sysgetenv.val != 0) { 877 if ((s = sys_datacopy_wrapper(SELF, 878 (vir_bytes) small_buf, who_e, 879 (vir_bytes) sysgetenv.val, 880 sysgetenv.vallen)) != OK) 881 return(s); 882 } 883 } 884 } 885 886 return(r); 887 } 888 default: 889 return(EINVAL); 890 } 891 } 892 893 /*===========================================================================* 894 * pm_dumpcore * 895 *===========================================================================*/ 896 int pm_dumpcore(int csig, vir_bytes exe_name) 897 { 898 int r, core_fd; 899 struct filp *f; 900 char core_path[PATH_MAX]; 901 char proc_name[PROC_NAME_LEN]; 902 903 /* In effect, the coredump is generated through the use of calls as if made 904 * by the process itself. As such, the process must not be doing anything 905 * else. Therefore, if the process was blocked on anything, unblock it 906 * first. This step is the reason we cannot use this function to generate a 907 * core dump of a process while it is still running (i.e., without 908 * terminating it), as it changes the state of the process. 909 */ 910 if (fp_is_blocked(fp)) 911 unpause(); 912 913 /* open core file */ 914 snprintf(core_path, PATH_MAX, "%s.%d", CORE_NAME, fp->fp_pid); 915 r = core_fd = common_open(core_path, O_WRONLY | O_CREAT | O_TRUNC, 916 CORE_MODE, FALSE /*for_exec*/); 917 if (r < 0) goto core_exit; 918 919 /* get process name */ 920 r = sys_datacopy_wrapper(PM_PROC_NR, exe_name, VFS_PROC_NR, 921 (vir_bytes) proc_name, PROC_NAME_LEN); 922 if (r != OK) goto core_exit; 923 proc_name[PROC_NAME_LEN - 1] = '\0'; 924 925 /* write the core dump */ 926 f = get_filp(core_fd, VNODE_WRITE); 927 assert(f != NULL); 928 write_elf_core_file(f, csig, proc_name); 929 unlock_filp(f); 930 931 core_exit: 932 /* The core file descriptor will be closed as part of the process exit. */ 933 free_proc(FP_EXITING); 934 935 return(r); 936 } 937 938 /*===========================================================================* 939 * ds_event * 940 *===========================================================================*/ 941 void 942 ds_event(void) 943 { 944 char key[DS_MAX_KEYLEN]; 945 char *blkdrv_prefix = "drv.blk."; 946 char *chrdrv_prefix = "drv.chr."; 947 u32_t value; 948 int type, r, is_blk; 949 endpoint_t owner_endpoint; 950 951 /* Get the event and the owner from DS. */ 952 while ((r = ds_check(key, &type, &owner_endpoint)) == OK) { 953 /* Only check for block and character driver up events. */ 954 if (!strncmp(key, blkdrv_prefix, strlen(blkdrv_prefix))) { 955 is_blk = TRUE; 956 } else if (!strncmp(key, chrdrv_prefix, strlen(chrdrv_prefix))) { 957 is_blk = FALSE; 958 } else { 959 continue; 960 } 961 962 if ((r = ds_retrieve_u32(key, &value)) != OK) { 963 printf("VFS: ds_event: ds_retrieve_u32 failed\n"); 964 break; 965 } 966 if (value != DS_DRIVER_UP) continue; 967 968 /* Perform up. */ 969 dmap_endpt_up(owner_endpoint, is_blk); 970 } 971 972 if (r != ENOENT) printf("VFS: ds_event: ds_check failed: %d\n", r); 973 } 974 975 /* A function to be called on panic(). */ 976 void panic_hook(void) 977 { 978 printf("VFS mthread stacktraces:\n"); 979 mthread_stacktraces(); 980 } 981 982 /*===========================================================================* 983 * do_getrusage * 984 *===========================================================================*/ 985 int do_getrusage(void) 986 { 987 /* Obsolete vfs_getrusage(2) call from userland. The getrusage call is 988 * now fully handled by PM, and for any future fields that should be 989 * supplied by VFS, VFS should be queried by PM rather than by the user 990 * program directly. TODO: remove this call after the next release. 991 */ 992 return OK; 993 } 994