1 /* 2 * a loop that gets messages requesting work, carries out the work, and sends 3 * replies. 4 * 5 * The entry points into this file are: 6 * main: main program of the Virtual File System 7 * reply: send a reply to a process after the requested work is done 8 * 9 */ 10 11 #include "fs.h" 12 #include <fcntl.h> 13 #include <string.h> 14 #include <stdio.h> 15 #include <signal.h> 16 #include <assert.h> 17 #include <stdlib.h> 18 #include <sys/ioc_memory.h> 19 #include <sys/svrctl.h> 20 #include <sys/select.h> 21 #include <minix/callnr.h> 22 #include <minix/com.h> 23 #include <minix/const.h> 24 #include <minix/endpoint.h> 25 #include <minix/safecopies.h> 26 #include <minix/debug.h> 27 #include <minix/vfsif.h> 28 #include "file.h" 29 #include "scratchpad.h" 30 #include "vmnt.h" 31 #include "vnode.h" 32 33 #if ENABLE_SYSCALL_STATS 34 EXTERN unsigned long calls_stats[NR_VFS_CALLS]; 35 #endif 36 37 /* Thread related prototypes */ 38 static void do_reply(struct worker_thread *wp); 39 static void do_work(void); 40 static void do_init_root(void); 41 static void handle_work(void (*func)(void)); 42 static void reply(message *m_out, endpoint_t whom, int result); 43 44 static void get_work(void); 45 static void service_pm(void); 46 static int unblock(struct fproc *rfp); 47 48 /* SEF functions and variables. */ 49 static void sef_local_startup(void); 50 static int sef_cb_init_fresh(int type, sef_init_info_t *info); 51 static endpoint_t receive_from; 52 53 /*===========================================================================* 54 * main * 55 *===========================================================================*/ 56 int main(void) 57 { 58 /* This is the main program of the file system. The main loop consists of 59 * three major activities: getting new work, processing the work, and sending 60 * the reply. This loop never terminates as long as the file system runs. 61 */ 62 int transid; 63 struct worker_thread *wp; 64 65 /* SEF local startup. */ 66 sef_local_startup(); 67 68 printf("Started VFS: %d worker thread(s)\n", NR_WTHREADS); 69 70 if (OK != (sys_getkinfo(&kinfo))) 71 panic("couldn't get kernel kinfo"); 72 73 /* This is the main loop that gets work, processes it, and sends replies. */ 74 while (TRUE) { 75 yield_all(); /* let other threads run */ 76 self = NULL; 77 send_work(); 78 get_work(); 79 80 transid = TRNS_GET_ID(m_in.m_type); 81 if (IS_VFS_FS_TRANSID(transid)) { 82 wp = worker_get((thread_t) transid - VFS_TRANSID); 83 if (wp == NULL || wp->w_fp == NULL) { 84 printf("VFS: spurious message %d from endpoint %d\n", 85 m_in.m_type, m_in.m_source); 86 continue; 87 } 88 m_in.m_type = TRNS_DEL_ID(m_in.m_type); 89 do_reply(wp); 90 continue; 91 } else if (who_e == PM_PROC_NR) { /* Calls from PM */ 92 /* Special control messages from PM */ 93 service_pm(); 94 continue; 95 } else if (is_notify(call_nr)) { 96 /* A task ipc_notify()ed us */ 97 switch (who_e) { 98 case DS_PROC_NR: 99 /* Start a thread to handle DS events, if no thread 100 * is pending or active for it already. DS is not 101 * supposed to issue calls to VFS or be the subject of 102 * postponed PM requests, so this should be no problem. 103 */ 104 if (worker_can_start(fp)) 105 handle_work(ds_event); 106 break; 107 case KERNEL: 108 mthread_stacktraces(); 109 break; 110 case CLOCK: 111 /* Timer expired. Used only for select(). Check it. */ 112 expire_timers(m_in.m_notify.timestamp); 113 break; 114 default: 115 printf("VFS: ignoring notification from %d\n", who_e); 116 } 117 continue; 118 } else if (who_p < 0) { /* i.e., message comes from a task */ 119 /* We're going to ignore this message. Tasks should 120 * send ipc_notify()s only. 121 */ 122 printf("VFS: ignoring message from %d (%d)\n", who_e, call_nr); 123 continue; 124 } 125 126 if (IS_BDEV_RS(call_nr)) { 127 /* We've got results for a block device request. */ 128 bdev_reply(); 129 } else if (IS_CDEV_RS(call_nr)) { 130 /* We've got results for a character device request. */ 131 cdev_reply(); 132 } else { 133 /* Normal syscall. This spawns a new thread. */ 134 handle_work(do_work); 135 } 136 } 137 return(OK); /* shouldn't come here */ 138 } 139 140 /*===========================================================================* 141 * handle_work * 142 *===========================================================================*/ 143 static void handle_work(void (*func)(void)) 144 { 145 /* Handle asynchronous device replies and new system calls. If the originating 146 * endpoint is an FS endpoint, take extra care not to get in deadlock. */ 147 struct vmnt *vmp = NULL; 148 endpoint_t proc_e; 149 int use_spare = FALSE; 150 151 proc_e = m_in.m_source; 152 153 if (fp->fp_flags & FP_SRV_PROC) { 154 vmp = find_vmnt(proc_e); 155 if (vmp != NULL) { 156 /* A callback from an FS endpoint. Can do only one at once. */ 157 if (vmp->m_flags & VMNT_CALLBACK) { 158 replycode(proc_e, EAGAIN); 159 return; 160 } 161 /* Already trying to resolve a deadlock? Can't handle more. */ 162 if (worker_available() == 0) { 163 replycode(proc_e, EAGAIN); 164 return; 165 } 166 /* A thread is available. Set callback flag. */ 167 vmp->m_flags |= VMNT_CALLBACK; 168 if (vmp->m_flags & VMNT_MOUNTING) { 169 vmp->m_flags |= VMNT_FORCEROOTBSF; 170 } 171 } 172 173 /* Use the spare thread to handle this request if needed. */ 174 use_spare = TRUE; 175 } 176 177 worker_start(fp, func, &m_in, use_spare); 178 } 179 180 181 /*===========================================================================* 182 * do_reply * 183 *===========================================================================*/ 184 static void do_reply(struct worker_thread *wp) 185 { 186 struct vmnt *vmp = NULL; 187 188 if(who_e != VM_PROC_NR && (vmp = find_vmnt(who_e)) == NULL) 189 panic("Couldn't find vmnt for endpoint %d", who_e); 190 191 if (wp->w_task != who_e) { 192 printf("VFS: tid %d: expected %d to reply, not %d\n", 193 wp->w_tid, wp->w_task, who_e); 194 } 195 *wp->w_sendrec = m_in; 196 wp->w_task = NONE; 197 if(vmp) vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */ 198 worker_signal(wp); /* Continue this thread */ 199 } 200 201 /*===========================================================================* 202 * do_pending_pipe * 203 *===========================================================================*/ 204 static void do_pending_pipe(void) 205 { 206 int r, op; 207 struct filp *f; 208 tll_access_t locktype; 209 210 f = scratch(fp).file.filp; 211 assert(f != NULL); 212 scratch(fp).file.filp = NULL; 213 214 locktype = (job_call_nr == VFS_READ) ? VNODE_READ : VNODE_WRITE; 215 op = (job_call_nr == VFS_READ) ? READING : WRITING; 216 lock_filp(f, locktype); 217 218 r = rw_pipe(op, who_e, f, scratch(fp).io.io_buffer, scratch(fp).io.io_nbytes); 219 220 if (r != SUSPEND) /* Do we have results to report? */ 221 replycode(fp->fp_endpoint, r); 222 223 unlock_filp(f); 224 } 225 226 /*===========================================================================* 227 * do_work * 228 *===========================================================================*/ 229 static void do_work(void) 230 { 231 unsigned int call_index; 232 int error; 233 234 if (fp->fp_pid == PID_FREE) { 235 /* Process vanished before we were able to handle request. 236 * Replying has no use. Just drop it. 237 */ 238 return; 239 } 240 241 memset(&job_m_out, 0, sizeof(job_m_out)); 242 243 /* At this point we assume that we're dealing with a call that has been 244 * made specifically to VFS. Typically it will be a POSIX call from a 245 * normal process, but we also handle a few calls made by drivers such 246 * such as UDS and VND through here. Call the internal function that 247 * does the work. 248 */ 249 if (IS_VFS_CALL(job_call_nr)) { 250 call_index = (unsigned int) (job_call_nr - VFS_BASE); 251 252 if (call_index < NR_VFS_CALLS && call_vec[call_index] != NULL) { 253 #if ENABLE_SYSCALL_STATS 254 calls_stats[call_index]++; 255 #endif 256 error = (*call_vec[call_index])(); 257 } else 258 error = ENOSYS; 259 } else 260 error = ENOSYS; 261 262 /* Copy the results back to the user and send reply. */ 263 if (error != SUSPEND) reply(&job_m_out, fp->fp_endpoint, error); 264 } 265 266 /*===========================================================================* 267 * sef_local_startup * 268 *===========================================================================*/ 269 static void sef_local_startup() 270 { 271 /* Register init callbacks. */ 272 sef_setcb_init_fresh(sef_cb_init_fresh); 273 sef_setcb_init_restart(sef_cb_init_fail); 274 275 /* No live update support for now. */ 276 277 /* Let SEF perform startup. */ 278 sef_startup(); 279 } 280 281 /*===========================================================================* 282 * sef_cb_init_fresh * 283 *===========================================================================*/ 284 static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *info) 285 { 286 /* Initialize the virtual file server. */ 287 int s, i; 288 struct fproc *rfp; 289 message mess; 290 struct rprocpub rprocpub[NR_BOOT_PROCS]; 291 292 receive_from = NONE; 293 self = NULL; 294 verbose = 0; 295 296 /* Initialize proc endpoints to NONE */ 297 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { 298 rfp->fp_endpoint = NONE; 299 rfp->fp_pid = PID_FREE; 300 } 301 302 /* Initialize the process table with help of the process manager messages. 303 * Expect one message for each system process with its slot number and pid. 304 * When no more processes follow, the magic process number NONE is sent. 305 * Then, stop and synchronize with the PM. 306 */ 307 do { 308 if ((s = sef_receive(PM_PROC_NR, &mess)) != OK) 309 panic("VFS: couldn't receive from PM: %d", s); 310 311 if (mess.m_type != VFS_PM_INIT) 312 panic("unexpected message from PM: %d", mess.m_type); 313 314 if (NONE == mess.VFS_PM_ENDPT) break; 315 316 rfp = &fproc[mess.VFS_PM_SLOT]; 317 rfp->fp_flags = FP_NOFLAGS; 318 rfp->fp_pid = mess.VFS_PM_PID; 319 rfp->fp_endpoint = mess.VFS_PM_ENDPT; 320 rfp->fp_grant = GRANT_INVALID; 321 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE; 322 rfp->fp_realuid = (uid_t) SYS_UID; 323 rfp->fp_effuid = (uid_t) SYS_UID; 324 rfp->fp_realgid = (gid_t) SYS_GID; 325 rfp->fp_effgid = (gid_t) SYS_GID; 326 rfp->fp_umask = ~0; 327 } while (TRUE); /* continue until process NONE */ 328 mess.m_type = OK; /* tell PM that we succeeded */ 329 s = ipc_send(PM_PROC_NR, &mess); /* send synchronization message */ 330 331 system_hz = sys_hz(); 332 333 /* Subscribe to block and character driver events. */ 334 s = ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL | DSF_OVERWRITE); 335 if (s != OK) panic("VFS: can't subscribe to driver events (%d)", s); 336 337 /* Initialize worker threads */ 338 worker_init(); 339 340 /* Initialize global locks */ 341 if (mthread_mutex_init(&bsf_lock, NULL) != 0) 342 panic("VFS: couldn't initialize block special file lock"); 343 344 init_dmap(); /* Initialize device table. */ 345 346 /* Map all the services in the boot image. */ 347 if ((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0, 348 (vir_bytes) rprocpub, sizeof(rprocpub))) != OK){ 349 panic("sys_safecopyfrom failed: %d", s); 350 } 351 for (i = 0; i < NR_BOOT_PROCS; i++) { 352 if (rprocpub[i].in_use) { 353 if ((s = map_service(&rprocpub[i])) != OK) { 354 panic("VFS: unable to map service: %d", s); 355 } 356 } 357 } 358 359 /* Initialize locks and initial values for all processes. */ 360 for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) { 361 if (mutex_init(&rfp->fp_lock, NULL) != 0) 362 panic("unable to initialize fproc lock"); 363 rfp->fp_worker = NULL; 364 #if LOCK_DEBUG 365 rfp->fp_vp_rdlocks = 0; 366 rfp->fp_vmnt_rdlocks = 0; 367 #endif 368 369 /* Initialize process directories. mount_fs will set them to the 370 * correct values. 371 */ 372 for (i = 0; i < OPEN_MAX; i++) 373 rfp->fp_filp[i] = NULL; 374 rfp->fp_rd = NULL; 375 rfp->fp_wd = NULL; 376 } 377 378 init_vnodes(); /* init vnodes */ 379 init_vmnts(); /* init vmnt structures */ 380 init_select(); /* init select() structures */ 381 init_filps(); /* Init filp structures */ 382 383 /* Mount PFS and initial file system root. */ 384 worker_start(fproc_addr(VFS_PROC_NR), do_init_root, &mess /*unused*/, 385 FALSE /*use_spare*/); 386 387 return(OK); 388 } 389 390 /*===========================================================================* 391 * do_init_root * 392 *===========================================================================*/ 393 static void do_init_root(void) 394 { 395 char *mount_type, *mount_label; 396 int r; 397 398 /* Mount the pipe file server. */ 399 receive_from = PFS_PROC_NR; 400 401 mount_pfs(); 402 403 /* Mount the root file system. */ 404 receive_from = MFS_PROC_NR; 405 406 mount_type = "mfs"; /* FIXME: use boot image process name instead */ 407 mount_label = "fs_imgrd"; /* FIXME: obtain this from RS */ 408 409 r = mount_fs(DEV_IMGRD, "bootramdisk", "/", MFS_PROC_NR, 0, mount_type, 410 mount_label); 411 if (r != OK) 412 panic("Failed to initialize root"); 413 receive_from = ANY; 414 } 415 416 /*===========================================================================* 417 * lock_proc * 418 *===========================================================================*/ 419 void lock_proc(struct fproc *rfp) 420 { 421 int r; 422 struct worker_thread *org_self; 423 424 r = mutex_trylock(&rfp->fp_lock); 425 if (r == 0) return; 426 427 org_self = worker_suspend(); 428 429 if ((r = mutex_lock(&rfp->fp_lock)) != 0) 430 panic("unable to lock fproc lock: %d", r); 431 432 worker_resume(org_self); 433 } 434 435 /*===========================================================================* 436 * unlock_proc * 437 *===========================================================================*/ 438 void unlock_proc(struct fproc *rfp) 439 { 440 int r; 441 442 if ((r = mutex_unlock(&rfp->fp_lock)) != 0) 443 panic("Failed to unlock: %d", r); 444 } 445 446 /*===========================================================================* 447 * thread_cleanup * 448 *===========================================================================*/ 449 void thread_cleanup(void) 450 { 451 /* Perform cleanup actions for a worker thread. */ 452 453 #if LOCK_DEBUG 454 check_filp_locks_by_me(); 455 check_vnode_locks_by_me(fp); 456 check_vmnt_locks_by_me(fp); 457 #endif 458 459 if (fp->fp_flags & FP_SRV_PROC) { 460 struct vmnt *vmp; 461 462 if ((vmp = find_vmnt(fp->fp_endpoint)) != NULL) { 463 vmp->m_flags &= ~VMNT_CALLBACK; 464 } 465 } 466 } 467 468 /*===========================================================================* 469 * get_work * 470 *===========================================================================*/ 471 static void get_work() 472 { 473 /* Normally wait for new input. However, if 'reviving' is 474 * nonzero, a suspended process must be awakened. 475 */ 476 int r, found_one, proc_p; 477 register struct fproc *rp; 478 479 while (reviving != 0) { 480 found_one = FALSE; 481 482 /* Find a suspended process. */ 483 for (rp = &fproc[0]; rp < &fproc[NR_PROCS]; rp++) 484 if (rp->fp_pid != PID_FREE && (rp->fp_flags & FP_REVIVED)) { 485 found_one = TRUE; /* Found a suspended process */ 486 if (unblock(rp)) 487 return; /* So main loop can process job */ 488 send_work(); 489 } 490 491 if (!found_one) /* Consistency error */ 492 panic("VFS: get_work couldn't revive anyone"); 493 } 494 495 for(;;) { 496 assert(receive_from != NONE); 497 498 /* Normal case. No one to revive. Get a useful request. */ 499 if ((r = sef_receive(receive_from, &m_in)) != OK) { 500 panic("VFS: sef_receive error: %d", r); 501 } 502 503 proc_p = _ENDPOINT_P(m_in.m_source); 504 if (proc_p < 0 || proc_p >= NR_PROCS) fp = NULL; 505 else fp = &fproc[proc_p]; 506 507 if (m_in.m_type == EDEADSRCDST) { 508 printf("VFS: failed ipc_sendrec\n"); 509 return; /* Failed 'ipc_sendrec' */ 510 } 511 512 /* Negative who_p is never used to access the fproc array. Negative 513 * numbers (kernel tasks) are treated in a special way. 514 */ 515 if (fp && fp->fp_endpoint == NONE) { 516 printf("VFS: ignoring request from %d: NONE endpoint %d (%d)\n", 517 m_in.m_source, who_p, m_in.m_type); 518 continue; 519 } 520 521 /* Internal consistency check; our mental image of process numbers and 522 * endpoints must match with how the rest of the system thinks of them. 523 */ 524 if (fp && fp->fp_endpoint != who_e) { 525 if (fproc[who_p].fp_endpoint == NONE) 526 printf("slot unknown even\n"); 527 528 panic("VFS: receive endpoint inconsistent (source %d, who_p " 529 "%d, stored ep %d, who_e %d).\n", m_in.m_source, who_p, 530 fproc[who_p].fp_endpoint, who_e); 531 } 532 533 return; 534 } 535 } 536 537 /*===========================================================================* 538 * reply * 539 *===========================================================================*/ 540 static void reply(message *m_out, endpoint_t whom, int result) 541 { 542 /* Send a reply to a user process. If the send fails, just ignore it. */ 543 int r; 544 545 m_out->m_type = result; 546 r = ipc_sendnb(whom, m_out); 547 if (r != OK) { 548 printf("VFS: %d couldn't send reply %d to %d: %d\n", mthread_self(), 549 result, whom, r); 550 util_stacktrace(); 551 } 552 } 553 554 /*===========================================================================* 555 * replycode * 556 *===========================================================================*/ 557 void replycode(endpoint_t whom, int result) 558 { 559 /* Send a reply to a user process. If the send fails, just ignore it. */ 560 message m_out; 561 562 memset(&m_out, 0, sizeof(m_out)); 563 564 reply(&m_out, whom, result); 565 } 566 567 /*===========================================================================* 568 * service_pm_postponed * 569 *===========================================================================*/ 570 void service_pm_postponed(void) 571 { 572 int r, term_signal; 573 vir_bytes core_path; 574 vir_bytes exec_path, stack_frame, pc, newsp, ps_str; 575 size_t exec_path_len, stack_frame_len; 576 endpoint_t proc_e; 577 message m_out; 578 579 memset(&m_out, 0, sizeof(m_out)); 580 581 switch(job_call_nr) { 582 case VFS_PM_EXEC: 583 proc_e = job_m_in.VFS_PM_ENDPT; 584 exec_path = (vir_bytes) job_m_in.VFS_PM_PATH; 585 exec_path_len = (size_t) job_m_in.VFS_PM_PATH_LEN; 586 stack_frame = (vir_bytes) job_m_in.VFS_PM_FRAME; 587 stack_frame_len = (size_t) job_m_in.VFS_PM_FRAME_LEN; 588 ps_str = (vir_bytes) job_m_in.VFS_PM_PS_STR; 589 590 assert(proc_e == fp->fp_endpoint); 591 592 r = pm_exec(exec_path, exec_path_len, stack_frame, stack_frame_len, 593 &pc, &newsp, &ps_str); 594 595 /* Reply status to PM */ 596 m_out.m_type = VFS_PM_EXEC_REPLY; 597 m_out.VFS_PM_ENDPT = proc_e; 598 m_out.VFS_PM_PC = (void *) pc; 599 m_out.VFS_PM_STATUS = r; 600 m_out.VFS_PM_NEWSP = (void *) newsp; 601 m_out.VFS_PM_NEWPS_STR = ps_str; 602 603 break; 604 605 case VFS_PM_EXIT: 606 proc_e = job_m_in.VFS_PM_ENDPT; 607 608 assert(proc_e == fp->fp_endpoint); 609 610 pm_exit(); 611 612 /* Reply dummy status to PM for synchronization */ 613 m_out.m_type = VFS_PM_EXIT_REPLY; 614 m_out.VFS_PM_ENDPT = proc_e; 615 616 break; 617 618 case VFS_PM_DUMPCORE: 619 proc_e = job_m_in.VFS_PM_ENDPT; 620 term_signal = job_m_in.VFS_PM_TERM_SIG; 621 core_path = (vir_bytes) job_m_in.VFS_PM_PATH; 622 623 assert(proc_e == fp->fp_endpoint); 624 625 r = pm_dumpcore(term_signal, core_path); 626 627 /* Reply status to PM */ 628 m_out.m_type = VFS_PM_CORE_REPLY; 629 m_out.VFS_PM_ENDPT = proc_e; 630 m_out.VFS_PM_STATUS = r; 631 632 break; 633 634 case VFS_PM_UNPAUSE: 635 proc_e = job_m_in.VFS_PM_ENDPT; 636 637 assert(proc_e == fp->fp_endpoint); 638 639 unpause(); 640 641 m_out.m_type = VFS_PM_UNPAUSE_REPLY; 642 m_out.VFS_PM_ENDPT = proc_e; 643 644 break; 645 646 default: 647 panic("Unhandled postponed PM call %d", job_m_in.m_type); 648 } 649 650 r = ipc_send(PM_PROC_NR, &m_out); 651 if (r != OK) 652 panic("service_pm_postponed: ipc_send failed: %d", r); 653 } 654 655 /*===========================================================================* 656 * service_pm * 657 *===========================================================================*/ 658 static void service_pm(void) 659 { 660 /* Process a request from PM. This function is called from the main thread, and 661 * may therefore not block. Any requests that may require blocking the calling 662 * thread must be executed in a separate thread. Aside from VFS_PM_REBOOT, all 663 * requests from PM involve another, target process: for example, PM tells VFS 664 * that a process is performing a setuid() call. For some requests however, 665 * that other process may not be idle, and in that case VFS must serialize the 666 * PM request handling with any operation is it handling for that target 667 * process. As it happens, the requests that may require blocking are also the 668 * ones where the target process may not be idle. For both these reasons, such 669 * requests are run in worker threads associated to the target process. 670 */ 671 struct fproc *rfp; 672 int r, slot; 673 message m_out; 674 675 memset(&m_out, 0, sizeof(m_out)); 676 677 switch (call_nr) { 678 case VFS_PM_SETUID: 679 { 680 endpoint_t proc_e; 681 uid_t euid, ruid; 682 683 proc_e = m_in.VFS_PM_ENDPT; 684 euid = m_in.VFS_PM_EID; 685 ruid = m_in.VFS_PM_RID; 686 687 pm_setuid(proc_e, euid, ruid); 688 689 m_out.m_type = VFS_PM_SETUID_REPLY; 690 m_out.VFS_PM_ENDPT = proc_e; 691 } 692 break; 693 694 case VFS_PM_SETGID: 695 { 696 endpoint_t proc_e; 697 gid_t egid, rgid; 698 699 proc_e = m_in.VFS_PM_ENDPT; 700 egid = m_in.VFS_PM_EID; 701 rgid = m_in.VFS_PM_RID; 702 703 pm_setgid(proc_e, egid, rgid); 704 705 m_out.m_type = VFS_PM_SETGID_REPLY; 706 m_out.VFS_PM_ENDPT = proc_e; 707 } 708 break; 709 710 case VFS_PM_SETSID: 711 { 712 endpoint_t proc_e; 713 714 proc_e = m_in.VFS_PM_ENDPT; 715 pm_setsid(proc_e); 716 717 m_out.m_type = VFS_PM_SETSID_REPLY; 718 m_out.VFS_PM_ENDPT = proc_e; 719 } 720 break; 721 722 case VFS_PM_EXEC: 723 case VFS_PM_EXIT: 724 case VFS_PM_DUMPCORE: 725 case VFS_PM_UNPAUSE: 726 { 727 endpoint_t proc_e = m_in.VFS_PM_ENDPT; 728 729 if(isokendpt(proc_e, &slot) != OK) { 730 printf("VFS: proc ep %d not ok\n", proc_e); 731 return; 732 } 733 734 rfp = &fproc[slot]; 735 736 /* PM requests on behalf of a proc are handled after the 737 * system call that might be in progress for that proc has 738 * finished. If the proc is not busy, we start a new thread. 739 */ 740 worker_start(rfp, NULL, &m_in, FALSE /*use_spare*/); 741 742 return; 743 } 744 case VFS_PM_FORK: 745 case VFS_PM_SRV_FORK: 746 { 747 endpoint_t pproc_e, proc_e; 748 pid_t child_pid; 749 uid_t reuid; 750 gid_t regid; 751 752 pproc_e = m_in.VFS_PM_PENDPT; 753 proc_e = m_in.VFS_PM_ENDPT; 754 child_pid = m_in.VFS_PM_CPID; 755 reuid = m_in.VFS_PM_REUID; 756 regid = m_in.VFS_PM_REGID; 757 758 pm_fork(pproc_e, proc_e, child_pid); 759 m_out.m_type = VFS_PM_FORK_REPLY; 760 761 if (call_nr == VFS_PM_SRV_FORK) { 762 m_out.m_type = VFS_PM_SRV_FORK_REPLY; 763 pm_setuid(proc_e, reuid, reuid); 764 pm_setgid(proc_e, regid, regid); 765 } 766 767 m_out.VFS_PM_ENDPT = proc_e; 768 } 769 break; 770 case VFS_PM_SETGROUPS: 771 { 772 endpoint_t proc_e; 773 int group_no; 774 gid_t *group_addr; 775 776 proc_e = m_in.VFS_PM_ENDPT; 777 group_no = m_in.VFS_PM_GROUP_NO; 778 group_addr = (gid_t *) m_in.VFS_PM_GROUP_ADDR; 779 780 pm_setgroups(proc_e, group_no, group_addr); 781 782 m_out.m_type = VFS_PM_SETGROUPS_REPLY; 783 m_out.VFS_PM_ENDPT = proc_e; 784 } 785 break; 786 787 case VFS_PM_REBOOT: 788 /* Reboot requests are not considered postponed PM work and are instead 789 * handled from a separate worker thread that is associated with PM's 790 * process. PM makes no regular VFS calls, and thus, from VFS's 791 * perspective, PM is always idle. Therefore, we can safely do this. 792 * We do assume that PM sends us only one VFS_PM_REBOOT message at 793 * once, or ever for that matter. :) 794 */ 795 worker_start(fproc_addr(PM_PROC_NR), pm_reboot, &m_in, 796 FALSE /*use_spare*/); 797 798 return; 799 800 default: 801 printf("VFS: don't know how to handle PM request %d\n", call_nr); 802 803 return; 804 } 805 806 r = ipc_send(PM_PROC_NR, &m_out); 807 if (r != OK) 808 panic("service_pm: ipc_send failed: %d", r); 809 } 810 811 812 /*===========================================================================* 813 * unblock * 814 *===========================================================================*/ 815 static int unblock(rfp) 816 struct fproc *rfp; 817 { 818 /* Unblock a process that was previously blocked on a pipe or a lock. This is 819 * done by reconstructing the original request and continuing/repeating it. 820 * This function returns TRUE when it has restored a request for execution, and 821 * FALSE if the caller should continue looking for work to do. 822 */ 823 int blocked_on; 824 825 blocked_on = rfp->fp_blocked_on; 826 827 /* Reconstruct the original request from the saved data. */ 828 memset(&m_in, 0, sizeof(m_in)); 829 m_in.m_source = rfp->fp_endpoint; 830 m_in.m_type = rfp->fp_block_callnr; 831 switch (m_in.m_type) { 832 case VFS_READ: 833 case VFS_WRITE: 834 assert(blocked_on == FP_BLOCKED_ON_PIPE); 835 m_in.m_lc_vfs_readwrite.fd = scratch(rfp).file.fd_nr; 836 m_in.m_lc_vfs_readwrite.buf = scratch(rfp).io.io_buffer; 837 m_in.m_lc_vfs_readwrite.len = scratch(rfp).io.io_nbytes; 838 break; 839 case VFS_FCNTL: 840 assert(blocked_on == FP_BLOCKED_ON_LOCK); 841 m_in.m_lc_vfs_fcntl.fd = scratch(rfp).file.fd_nr; 842 m_in.m_lc_vfs_fcntl.cmd = scratch(rfp).io.io_nbytes; 843 m_in.m_lc_vfs_fcntl.arg_ptr = scratch(rfp).io.io_buffer; 844 assert(m_in.m_lc_vfs_fcntl.cmd == F_SETLKW); 845 break; 846 default: 847 panic("unblocking call %d blocked on %d ??", m_in.m_type, blocked_on); 848 } 849 850 rfp->fp_blocked_on = FP_BLOCKED_ON_NONE; /* no longer blocked */ 851 rfp->fp_flags &= ~FP_REVIVED; 852 reviving--; 853 assert(reviving >= 0); 854 855 /* This should not be device I/O. If it is, it'll 'leak' grants. */ 856 assert(!GRANT_VALID(rfp->fp_grant)); 857 858 /* Pending pipe reads/writes cannot be repeated as is, and thus require a 859 * special resumption procedure. 860 */ 861 if (blocked_on == FP_BLOCKED_ON_PIPE) { 862 worker_start(rfp, do_pending_pipe, &m_in, FALSE /*use_spare*/); 863 return(FALSE); /* Retrieve more work */ 864 } 865 866 /* A lock request. Repeat the original request as though it just came in. */ 867 fp = rfp; 868 return(TRUE); /* We've unblocked a process */ 869 } 870