1 2 #define _SYSTEM 1 3 4 #include <minix/callnr.h> 5 #include <minix/com.h> 6 #include <minix/config.h> 7 #include <minix/const.h> 8 #include <minix/ds.h> 9 #include <minix/endpoint.h> 10 #include <minix/minlib.h> 11 #include <minix/type.h> 12 #include <minix/ipc.h> 13 #include <minix/sysutil.h> 14 #include <minix/syslib.h> 15 #include <minix/const.h> 16 #include <minix/bitmap.h> 17 #include <minix/rs.h> 18 #include <minix/vfsif.h> 19 20 #include <sys/exec.h> 21 22 #include <libexec.h> 23 #include <ctype.h> 24 #include <errno.h> 25 #include <string.h> 26 #include <env.h> 27 #include <stdio.h> 28 #include <assert.h> 29 30 #define _MAIN 1 31 #include "glo.h" 32 #include "proto.h" 33 #include "util.h" 34 #include "vm.h" 35 #include "sanitycheck.h" 36 37 extern int missing_spares; 38 39 #include <machine/archtypes.h> 40 #include <sys/param.h> 41 #include "kernel/const.h" 42 #include "kernel/config.h" 43 #include "kernel/proc.h" 44 45 #include <signal.h> 46 #include <lib.h> 47 48 /* Table of calls and a macro to test for being in range. */ 49 struct { 50 int (*vmc_func)(message *); /* Call handles message. */ 51 const char *vmc_name; /* Human-readable string. */ 52 } vm_calls[NR_VM_CALLS]; 53 54 /* Macro to verify call range and map 'high' range to 'base' range 55 * (starting at 0) in one. Evaluates to zero-based call number if call 56 * number is valid, returns -1 otherwise. 57 */ 58 #define CALLNUMBER(c) (((c) >= VM_RQ_BASE && \ 59 (c) < VM_RQ_BASE + ELEMENTS(vm_calls)) ? \ 60 ((c) - VM_RQ_BASE) : -1) 61 62 static int map_service(struct rprocpub *rpub); 63 64 static struct rprocpub rprocpub[NR_SYS_PROCS]; 65 int __vm_init_fresh; 66 67 /* SEF functions and variables. */ 68 static void sef_local_startup(void); 69 static int sef_cb_init_lu_restart(int type, sef_init_info_t *info); 70 static int sef_cb_init_fresh(int type, sef_init_info_t *info); 71 static void sef_cb_signal_handler(int signo); 72 73 void init_vm(void); 74 75 int do_sef_init_request(message *); 76 77 /*===========================================================================* 78 * is_first_time * 79 *===========================================================================*/ 80 static int is_first_time(void) 81 { 82 struct proc rs_proc; 83 int r; 84 85 if ((r = sys_getproc(&rs_proc, RS_PROC_NR)) != OK) 86 panic("VM: couldn't get RS process data: %d", r); 87 88 return RTS_ISSET(&rs_proc, RTS_BOOTINHIBIT); 89 } 90 91 /*===========================================================================* 92 * main * 93 *===========================================================================*/ 94 int main(void) 95 { 96 message msg; 97 int result, who_e, rcv_sts; 98 int caller_slot; 99 100 /* Initialize system so that all processes are runnable the first time. */ 101 if (is_first_time()) { 102 init_vm(); 103 __vm_init_fresh=1; 104 } 105 106 /* SEF local startup. */ 107 sef_local_startup(); 108 __vm_init_fresh=0; 109 110 SANITYCHECK(SCL_TOP); 111 112 /* This is VM's main loop. */ 113 while (TRUE) { 114 int r, c; 115 int type; 116 int transid = 0; /* VFS transid if any */ 117 118 SANITYCHECK(SCL_TOP); 119 if(missing_spares > 0) { 120 alloc_cycle(); /* mem alloc code wants to be called */ 121 } 122 123 if ((r=sef_receive_status(ANY, &msg, &rcv_sts)) != OK) 124 panic("sef_receive_status() error: %d", r); 125 126 if (is_ipc_notify(rcv_sts)) { 127 /* Unexpected ipc_notify(). */ 128 printf("VM: ignoring ipc_notify() from %d\n", msg.m_source); 129 continue; 130 } 131 who_e = msg.m_source; 132 if(vm_isokendpt(who_e, &caller_slot) != OK) 133 panic("invalid caller %d", who_e); 134 135 /* We depend on this being false for the initialized value. */ 136 assert(!IS_VFS_FS_TRANSID(transid)); 137 138 type = msg.m_type; 139 c = CALLNUMBER(type); 140 result = ENOSYS; /* Out of range or restricted calls return this. */ 141 142 transid = TRNS_GET_ID(msg.m_type); 143 144 if((msg.m_source == VFS_PROC_NR) && IS_VFS_FS_TRANSID(transid)) { 145 /* If it's a request from VFS, it might have a transaction id. */ 146 msg.m_type = TRNS_DEL_ID(msg.m_type); 147 148 /* Calls that use the transid */ 149 result = do_procctl(&msg, transid); 150 } else if(msg.m_type == RS_INIT && msg.m_source == RS_PROC_NR) { 151 result = do_sef_init_request(&msg); 152 if(result != OK) panic("do_sef_init_request failed!\n"); 153 result = SUSPEND; /* do not reply to RS */ 154 } else if (msg.m_type == VM_PAGEFAULT) { 155 if (!IPC_STATUS_FLAGS_TEST(rcv_sts, IPC_FLG_MSG_FROM_KERNEL)) { 156 printf("VM: process %d faked VM_PAGEFAULT " 157 "message!\n", msg.m_source); 158 } 159 do_pagefaults(&msg); 160 /* 161 * do not reply to this call, the caller is unblocked by 162 * a sys_vmctl() call in do_pagefaults if success. VM panics 163 * otherwise 164 */ 165 continue; 166 } else if(c < 0 || !vm_calls[c].vmc_func) { 167 /* out of range or missing callnr */ 168 } else { 169 if (acl_check(&vmproc[caller_slot], c) != OK) { 170 printf("VM: unauthorized %s by %d\n", 171 vm_calls[c].vmc_name, who_e); 172 } else { 173 SANITYCHECK(SCL_FUNCTIONS); 174 result = vm_calls[c].vmc_func(&msg); 175 SANITYCHECK(SCL_FUNCTIONS); 176 } 177 } 178 179 /* Send reply message, unless the return code is SUSPEND, 180 * which is a pseudo-result suppressing the reply message. 181 */ 182 if(result != SUSPEND) { 183 msg.m_type = result; 184 185 assert(!IS_VFS_FS_TRANSID(transid)); 186 187 if((r=ipc_send(who_e, &msg)) != OK) { 188 printf("VM: couldn't send %d to %d (err %d)\n", 189 msg.m_type, who_e, r); 190 panic("ipc_send() error"); 191 } 192 } 193 } 194 return(OK); 195 } 196 197 static void sef_cb_lu_state_changed(int old_state, int state) 198 { 199 /* Called whenever the live-update state changes. We need to restore certain 200 * state in the old VM instance after a live update has failed, because some 201 * but not all memory is shared between the two VM instances. 202 */ 203 struct vmproc *vmp; 204 205 if (state == SEF_LU_STATE_NULL) { 206 /* Undo some of the changes that may have been made by the new VM 207 * instance. If the new VM instance is us, nothing happens. 208 */ 209 vmp = &vmproc[VM_PROC_NR]; 210 211 /* Rebind page tables. */ 212 pt_bind(&vmp->vm_pt, vmp); 213 pt_clearmapcache(); 214 215 /* Readjust process references. */ 216 adjust_proc_refs(); 217 } 218 } 219 220 static void sef_local_startup(void) 221 { 222 /* Register init callbacks. */ 223 sef_setcb_init_fresh(sef_cb_init_fresh); 224 sef_setcb_init_lu(sef_cb_init_lu_restart); 225 sef_setcb_init_restart(sef_cb_init_lu_restart); 226 /* In order to avoid a deadlock at boot time, send the first RS_INIT 227 * reply to RS asynchronously. After that, use sendrec as usual. 228 */ 229 if (__vm_init_fresh) 230 sef_setcb_init_response(sef_cb_init_response_rs_asyn_once); 231 232 /* Register live update callbacks. */ 233 sef_setcb_lu_state_changed(sef_cb_lu_state_changed); 234 235 /* Register signal callbacks. */ 236 sef_setcb_signal_handler(sef_cb_signal_handler); 237 238 /* Let SEF perform startup. */ 239 sef_startup(); 240 } 241 242 static int sef_cb_init_fresh(int type, sef_init_info_t *info) 243 { 244 int s, i; 245 246 /* Map all the services in the boot image. */ 247 if((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0, 248 (vir_bytes) rprocpub, sizeof(rprocpub))) != OK) { 249 panic("vm: sys_safecopyfrom (rs) failed: %d", s); 250 } 251 252 for(i=0;i < NR_BOOT_PROCS;i++) { 253 if(rprocpub[i].in_use) { 254 if((s = map_service(&rprocpub[i])) != OK) { 255 panic("unable to map service: %d", s); 256 } 257 } 258 } 259 260 return(OK); 261 } 262 263 static struct vmproc *init_proc(endpoint_t ep_nr) 264 { 265 struct boot_image *ip; 266 267 for (ip = &kernel_boot_info.boot_procs[0]; 268 ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) { 269 struct vmproc *vmp; 270 271 if(ip->proc_nr != ep_nr) continue; 272 273 if(ip->proc_nr >= _NR_PROCS || ip->proc_nr < 0) 274 panic("proc: %d", ip->proc_nr); 275 276 vmp = &vmproc[ip->proc_nr]; 277 assert(!(vmp->vm_flags & VMF_INUSE)); /* no double procs */ 278 clear_proc(vmp); 279 vmp->vm_flags = VMF_INUSE; 280 vmp->vm_endpoint = ip->endpoint; 281 vmp->vm_boot = ip; 282 283 return vmp; 284 } 285 286 panic("no init_proc"); 287 } 288 289 struct vm_exec_info { 290 struct exec_info execi; 291 struct boot_image *ip; 292 struct vmproc *vmp; 293 }; 294 295 static int libexec_copy_physcopy(struct exec_info *execi, 296 off_t off, vir_bytes vaddr, size_t len) 297 { 298 vir_bytes end; 299 struct vm_exec_info *ei = execi->opaque; 300 end = ei->ip->start_addr + ei->ip->len; 301 assert(ei->ip->start_addr + off + len <= end); 302 return sys_physcopy(NONE, ei->ip->start_addr + off, 303 execi->proc_e, vaddr, len, 0); 304 } 305 306 static void boot_alloc(struct exec_info *execi, off_t vaddr, 307 size_t len, int flags) 308 { 309 struct vmproc *vmp = ((struct vm_exec_info *) execi->opaque)->vmp; 310 311 if(!(map_page_region(vmp, vaddr, 0, len, 312 VR_ANON | VR_WRITABLE | VR_UNINITIALIZED, flags, 313 &mem_type_anon))) { 314 panic("VM: exec: map_page_region for boot process failed"); 315 } 316 } 317 318 static int libexec_alloc_vm_prealloc(struct exec_info *execi, 319 vir_bytes vaddr, size_t len) 320 { 321 boot_alloc(execi, vaddr, len, MF_PREALLOC); 322 return OK; 323 } 324 325 static int libexec_alloc_vm_ondemand(struct exec_info *execi, 326 vir_bytes vaddr, size_t len) 327 { 328 boot_alloc(execi, vaddr, len, 0); 329 return OK; 330 } 331 332 static void exec_bootproc(struct vmproc *vmp, struct boot_image *ip) 333 { 334 struct vm_exec_info vmexeci; 335 struct exec_info *execi = &vmexeci.execi; 336 char hdr[VM_PAGE_SIZE]; 337 338 size_t frame_size = 0; /* Size of the new initial stack. */ 339 int argc = 0; /* Argument count. */ 340 int envc = 0; /* Environment count */ 341 char overflow = 0; /* No overflow yet. */ 342 struct ps_strings *psp; 343 344 int vsp = 0; /* (virtual) Stack pointer in new address space. */ 345 char *argv[] = { ip->proc_name, NULL }; 346 char *envp[] = { NULL }; 347 char *path = ip->proc_name; 348 char frame[VM_PAGE_SIZE]; 349 350 memset(&vmexeci, 0, sizeof(vmexeci)); 351 352 if(pt_new(&vmp->vm_pt) != OK) 353 panic("VM: no new pagetable"); 354 355 if(pt_bind(&vmp->vm_pt, vmp) != OK) 356 panic("VM: pt_bind failed"); 357 358 if(sys_physcopy(NONE, ip->start_addr, SELF, 359 (vir_bytes) hdr, sizeof(hdr), 0) != OK) 360 panic("can't look at boot proc header"); 361 362 execi->stack_high = kernel_boot_info.user_sp; 363 execi->stack_size = DEFAULT_STACK_LIMIT; 364 execi->proc_e = vmp->vm_endpoint; 365 execi->hdr = hdr; 366 execi->hdr_len = sizeof(hdr); 367 strlcpy(execi->progname, ip->proc_name, sizeof(execi->progname)); 368 execi->frame_len = 0; 369 execi->opaque = &vmexeci; 370 execi->filesize = ip->len; 371 372 vmexeci.ip = ip; 373 vmexeci.vmp = vmp; 374 375 /* callback functions and data */ 376 execi->copymem = libexec_copy_physcopy; 377 execi->clearproc = NULL; 378 execi->clearmem = libexec_clear_sys_memset; 379 execi->allocmem_prealloc_junk = libexec_alloc_vm_prealloc; 380 execi->allocmem_prealloc_cleared = libexec_alloc_vm_prealloc; 381 execi->allocmem_ondemand = libexec_alloc_vm_ondemand; 382 383 if (libexec_load_elf(execi) != OK) 384 panic("vm: boot process load of process %s (ep=%d) failed\n", 385 execi->progname, vmp->vm_endpoint); 386 387 /* Setup a minimal stack. */ 388 minix_stack_params(path, argv, envp, &frame_size, &overflow, &argc, 389 &envc); 390 391 /* The party is off if there is an overflow, or it is too big for our 392 * pre-allocated space. */ 393 if(overflow || frame_size > sizeof(frame)) 394 panic("vm: could not alloc stack for boot process %s (ep=%d)\n", 395 execi->progname, vmp->vm_endpoint); 396 397 minix_stack_fill(path, argc, argv, envc, envp, frame_size, frame, &vsp, 398 &psp); 399 400 if(handle_memory_once(vmp, vsp, frame_size, 1) != OK) 401 panic("vm: could not map stack for boot process %s (ep=%d)\n", 402 execi->progname, vmp->vm_endpoint); 403 404 if(sys_datacopy(SELF, (vir_bytes)frame, vmp->vm_endpoint, vsp, frame_size) != OK) 405 panic("vm: could not copy stack for boot process %s (ep=%d)\n", 406 execi->progname, vmp->vm_endpoint); 407 408 if(sys_exec(vmp->vm_endpoint, (vir_bytes)vsp, 409 (vir_bytes)execi->progname, execi->pc, 410 vsp + ((int)psp - (int)frame)) != OK) 411 panic("vm: boot process exec of process %s (ep=%d) failed\n", 412 execi->progname,vmp->vm_endpoint); 413 414 /* make it runnable */ 415 if(sys_vmctl(vmp->vm_endpoint, VMCTL_BOOTINHIBIT_CLEAR, 0) != OK) 416 panic("VMCTL_BOOTINHIBIT_CLEAR failed"); 417 } 418 419 static int do_procctl_notrans(message *msg) 420 { 421 int transid = 0; 422 423 assert(!IS_VFS_FS_TRANSID(transid)); 424 425 return do_procctl(msg, transid); 426 } 427 428 void init_vm(void) 429 { 430 int s, i; 431 static struct memory mem_chunks[NR_MEMS]; 432 struct boot_image *ip; 433 extern void __minix_init(void); 434 multiboot_module_t *mod; 435 vir_bytes kern_dyn, kern_static; 436 437 #if SANITYCHECKS 438 incheck = nocheck = 0; 439 #endif 440 441 /* Retrieve various crucial boot parameters */ 442 if(OK != (s=sys_getkinfo(&kernel_boot_info))) { 443 panic("couldn't get bootinfo: %d", s); 444 } 445 446 /* Turn file mmap on? */ 447 enable_filemap=1; /* yes by default */ 448 env_parse("filemap", "d", 0, &enable_filemap, 0, 1); 449 450 /* Sanity check */ 451 assert(kernel_boot_info.mmap_size > 0); 452 assert(kernel_boot_info.mods_with_kernel > 0); 453 454 /* Get chunks of available memory. */ 455 get_mem_chunks(mem_chunks); 456 457 /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */ 458 memset(vmproc, 0, sizeof(vmproc)); 459 460 for(i = 0; i < ELEMENTS(vmproc); i++) { 461 vmproc[i].vm_slot = i; 462 } 463 464 /* Initialize ACL data structures. */ 465 acl_init(); 466 467 /* region management initialization. */ 468 map_region_init(); 469 470 /* Initialize tables to all physical memory. */ 471 mem_init(mem_chunks); 472 473 /* Architecture-dependent initialization. */ 474 init_proc(VM_PROC_NR); 475 pt_init(); 476 477 /* Acquire kernel ipc vectors that weren't available 478 * before VM had determined kernel mappings 479 */ 480 __minix_init(); 481 482 /* The kernel's freelist does not include boot-time modules; let 483 * the allocator know that the total memory is bigger. 484 */ 485 for (mod = &kernel_boot_info.module_list[0]; 486 mod < &kernel_boot_info.module_list[kernel_boot_info.mods_with_kernel-1]; mod++) { 487 phys_bytes len = mod->mod_end-mod->mod_start+1; 488 len = roundup(len, VM_PAGE_SIZE); 489 mem_add_total_pages(len/VM_PAGE_SIZE); 490 } 491 492 kern_dyn = kernel_boot_info.kernel_allocated_bytes_dynamic; 493 kern_static = kernel_boot_info.kernel_allocated_bytes; 494 kern_static = roundup(kern_static, VM_PAGE_SIZE); 495 mem_add_total_pages((kern_dyn + kern_static)/VM_PAGE_SIZE); 496 497 /* Give these processes their own page table. */ 498 for (ip = &kernel_boot_info.boot_procs[0]; 499 ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) { 500 struct vmproc *vmp; 501 502 if(ip->proc_nr < 0) continue; 503 504 assert(ip->start_addr); 505 506 /* VM has already been set up by the kernel and pt_init(). 507 * Any other boot process is already in memory and is set up 508 * here. 509 */ 510 if(ip->proc_nr == VM_PROC_NR) continue; 511 512 vmp = init_proc(ip->proc_nr); 513 514 exec_bootproc(vmp, ip); 515 516 /* Free the file blob */ 517 assert(!(ip->start_addr % VM_PAGE_SIZE)); 518 ip->len = roundup(ip->len, VM_PAGE_SIZE); 519 free_mem(ABS2CLICK(ip->start_addr), ABS2CLICK(ip->len)); 520 } 521 522 /* Set up table of calls. */ 523 #define CALLMAP(code, func) { int _cmi; \ 524 _cmi=CALLNUMBER(code); \ 525 assert(_cmi >= 0); \ 526 assert(_cmi < NR_VM_CALLS); \ 527 vm_calls[_cmi].vmc_func = (func); \ 528 vm_calls[_cmi].vmc_name = #code; \ 529 } 530 531 /* Set call table to 0. This invalidates all calls (clear 532 * vmc_func). 533 */ 534 memset(vm_calls, 0, sizeof(vm_calls)); 535 536 /* Basic VM calls. */ 537 CALLMAP(VM_MMAP, do_mmap); 538 CALLMAP(VM_MUNMAP, do_munmap); 539 CALLMAP(VM_MAP_PHYS, do_map_phys); 540 CALLMAP(VM_UNMAP_PHYS, do_munmap); 541 542 /* Calls from PM. */ 543 CALLMAP(VM_EXIT, do_exit); 544 CALLMAP(VM_FORK, do_fork); 545 CALLMAP(VM_BRK, do_brk); 546 CALLMAP(VM_WILLEXIT, do_willexit); 547 CALLMAP(VM_NOTIFY_SIG, do_notify_sig); 548 549 CALLMAP(VM_PROCCTL, do_procctl_notrans); 550 551 /* Calls from VFS. */ 552 CALLMAP(VM_VFS_REPLY, do_vfs_reply); 553 CALLMAP(VM_VFS_MMAP, do_vfs_mmap); 554 555 /* Calls from RS */ 556 CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv); 557 CALLMAP(VM_RS_PREPARE, do_rs_prepare); 558 CALLMAP(VM_RS_UPDATE, do_rs_update); 559 CALLMAP(VM_RS_MEMCTL, do_rs_memctl); 560 561 /* Generic calls. */ 562 CALLMAP(VM_REMAP, do_remap); 563 CALLMAP(VM_REMAP_RO, do_remap); 564 CALLMAP(VM_GETPHYS, do_get_phys); 565 CALLMAP(VM_SHM_UNMAP, do_munmap); 566 CALLMAP(VM_GETREF, do_get_refcount); 567 CALLMAP(VM_INFO, do_info); 568 CALLMAP(VM_QUERY_EXIT, do_query_exit); 569 CALLMAP(VM_WATCH_EXIT, do_watch_exit); 570 571 /* Cache blocks. */ 572 CALLMAP(VM_MAPCACHEPAGE, do_mapcache); 573 CALLMAP(VM_SETCACHEPAGE, do_setcache); 574 CALLMAP(VM_FORGETCACHEPAGE, do_forgetcache); 575 CALLMAP(VM_CLEARCACHE, do_clearcache); 576 577 /* getrusage */ 578 CALLMAP(VM_GETRUSAGE, do_getrusage); 579 580 /* Initialize the structures for queryexit */ 581 init_query_exit(); 582 583 /* Mark VM instances. */ 584 num_vm_instances = 1; 585 vmproc[VM_PROC_NR].vm_flags |= VMF_VM_INSTANCE; 586 587 /* Let SEF know about VM mmapped regions. */ 588 s = sef_llvm_add_special_mem_region((void*)VM_OWN_HEAPBASE, 589 VM_OWN_MMAPTOP-VM_OWN_HEAPBASE, "%MMAP_ALL"); 590 if(s < 0) { 591 printf("VM: st_add_special_mmapped_region failed %d\n", s); 592 } 593 } 594 595 /*===========================================================================* 596 * sef_cb_init_vm_multi_lu * 597 *===========================================================================*/ 598 static int sef_cb_init_vm_multi_lu(int type, sef_init_info_t *info) 599 { 600 message m; 601 int i, r; 602 ipc_filter_el_t ipc_filter[IPCF_MAX_ELEMENTS]; 603 int num_elements; 604 605 if(type != SEF_INIT_LU || !(info->flags & SEF_LU_MULTI)) { 606 return OK; 607 } 608 609 /* If this is a multi-component update, we need to perform the update 610 * for services that need to be updated. In addition, make sure VM 611 * can only receive messages from RS, tasks, and other services being 612 * updated until RS specifically sends a special update cancel message. 613 * This is necessary to limit the number of VM state changes to support 614 * rollback. Allow only safe message types for safe updates. 615 */ 616 memset(ipc_filter, 0, sizeof(ipc_filter)); 617 num_elements = 0; 618 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE; 619 ipc_filter[num_elements++].m_source = RS_PROC_NR; 620 if((r = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0, 621 (vir_bytes) rprocpub, NR_SYS_PROCS*sizeof(struct rprocpub))) != OK) { 622 panic("sys_safecopyfrom failed: %d", r); 623 } 624 m.m_source = VM_PROC_NR; 625 for(i=0;i < NR_SYS_PROCS;i++) { 626 if(rprocpub[i].in_use && rprocpub[i].old_endpoint != NONE) { 627 if(num_elements <= IPCF_MAX_ELEMENTS-5) { 628 /* VM_BRK is needed for normal operation during the live 629 * update. VM_INFO is needed for state transfer in the 630 * light of holes. Pagefaults and handle-memory requests 631 * are blocked intentionally, as handling these would 632 * prevent VM from being able to roll back. 633 */ 634 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE | IPCF_MATCH_M_TYPE; 635 ipc_filter[num_elements].m_source = rprocpub[i].old_endpoint; 636 ipc_filter[num_elements++].m_type = VM_BRK; 637 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE | IPCF_MATCH_M_TYPE; 638 ipc_filter[num_elements].m_source = rprocpub[i].new_endpoint; 639 ipc_filter[num_elements++].m_type = VM_BRK; 640 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE | IPCF_MATCH_M_TYPE; 641 ipc_filter[num_elements].m_source = rprocpub[i].old_endpoint; 642 ipc_filter[num_elements++].m_type = VM_INFO; 643 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE | IPCF_MATCH_M_TYPE; 644 ipc_filter[num_elements].m_source = rprocpub[i].new_endpoint; 645 ipc_filter[num_elements++].m_type = VM_INFO; 646 /* Make sure we can talk to any RS instance. */ 647 if(rprocpub[i].old_endpoint == RS_PROC_NR) { 648 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE; 649 ipc_filter[num_elements++].m_source = rprocpub[i].new_endpoint; 650 } 651 else if(rprocpub[i].new_endpoint == RS_PROC_NR) { 652 ipc_filter[num_elements].flags = IPCF_MATCH_M_SOURCE; 653 ipc_filter[num_elements++].m_source = rprocpub[i].old_endpoint; 654 } 655 } 656 else { 657 printf("sef_cb_init_vm_multi_lu: skipping ipc filter elements for %d and %d\n", 658 rprocpub[i].old_endpoint, rprocpub[i].new_endpoint); 659 } 660 if(rprocpub[i].sys_flags & SF_VM_UPDATE) { 661 m.m_lsys_vm_update.src = rprocpub[i].new_endpoint; 662 m.m_lsys_vm_update.dst = rprocpub[i].old_endpoint; 663 m.m_lsys_vm_update.flags = rprocpub[i].sys_flags; 664 r = do_rs_update(&m); 665 if(r != OK && r != SUSPEND) { 666 printf("sef_cb_init_vm_multi_lu: do_rs_update failed: %d", r); 667 } 668 } 669 } 670 } 671 672 r = sys_statectl(SYS_STATE_ADD_IPC_WL_FILTER, ipc_filter, num_elements*sizeof(ipc_filter_el_t)); 673 if(r != OK) { 674 printf("sef_cb_init_vm_multi_lu: sys_statectl failed: %d", r); 675 } 676 677 return OK; 678 } 679 680 /*===========================================================================* 681 * sef_cb_init_lu_restart * 682 *===========================================================================*/ 683 static int sef_cb_init_lu_restart(int type, sef_init_info_t *info) 684 { 685 /* Restart the vm server. */ 686 int r; 687 endpoint_t old_e; 688 int old_p; 689 struct vmproc *old_vmp, *new_vmp; 690 691 /* Perform default state transfer first. */ 692 if(type == SEF_INIT_LU) { 693 sef_setcb_init_restart(SEF_CB_INIT_RESTART_STATEFUL); 694 r = SEF_CB_INIT_LU_DEFAULT(type, info); 695 } 696 else { 697 r = SEF_CB_INIT_RESTART_STATEFUL(type, info); 698 } 699 if(r != OK) { 700 return r; 701 } 702 703 /* Lookup slots for old process. */ 704 old_e = info->old_endpoint; 705 if(vm_isokendpt(old_e, &old_p) != OK) { 706 printf("sef_cb_init_lu_restart: bad old endpoint %d\n", old_e); 707 return EINVAL; 708 } 709 old_vmp = &vmproc[old_p]; 710 new_vmp = &vmproc[VM_PROC_NR]; 711 712 /* Swap proc slots and dynamic data. */ 713 if((r = swap_proc_slot(old_vmp, new_vmp)) != OK) { 714 printf("sef_cb_init_lu_restart: swap_proc_slot failed\n"); 715 return r; 716 } 717 if((r = swap_proc_dyn_data(old_vmp, new_vmp, 0)) != OK) { 718 printf("sef_cb_init_lu_restart: swap_proc_dyn_data failed\n"); 719 return r; 720 } 721 722 /* Rebind page tables. */ 723 pt_bind(&new_vmp->vm_pt, new_vmp); 724 pt_bind(&old_vmp->vm_pt, old_vmp); 725 pt_clearmapcache(); 726 727 /* Adjust process references. */ 728 adjust_proc_refs(); 729 730 /* Handle multi-component live update when necessary. */ 731 return sef_cb_init_vm_multi_lu(type, info); 732 } 733 734 /*===========================================================================* 735 * sef_cb_signal_handler * 736 *===========================================================================*/ 737 static void sef_cb_signal_handler(int signo) 738 { 739 /* Check for known kernel signals, ignore anything else. */ 740 switch(signo) { 741 /* There is a pending memory request from the kernel. */ 742 case SIGKMEM: 743 do_memory(); 744 break; 745 } 746 747 /* It can happen that we get stuck receiving signals 748 * without sef_receive() returning. We could need more memory 749 * though. 750 */ 751 if(missing_spares > 0) { 752 alloc_cycle(); /* pagetable code wants to be called */ 753 } 754 755 pt_clearmapcache(); 756 } 757 758 /*===========================================================================* 759 * map_service * 760 *===========================================================================*/ 761 static int map_service(struct rprocpub *rpub) 762 { 763 /* Map a new service by initializing its call mask. */ 764 int r, proc_nr; 765 766 if ((r = vm_isokendpt(rpub->endpoint, &proc_nr)) != OK) { 767 return r; 768 } 769 770 /* Copy the call mask. */ 771 acl_set(&vmproc[proc_nr], rpub->vm_call_mask, !IS_RPUB_BOOT_USR(rpub)); 772 773 return(OK); 774 } 775