1 /* This file contains some utility routines for RS. 2 * 3 * Changes: 4 * Nov 22, 2009: Created (Cristiano Giuffrida) 5 */ 6 7 #include "inc.h" 8 9 #include <assert.h> 10 #include <minix/sched.h> 11 #include "kernel/proc.h" 12 13 #define PRINT_SEP() printf("---------------------------------------------------------------------------------\n") 14 15 /*===========================================================================* 16 * init_service * 17 *===========================================================================*/ 18 int init_service(struct rproc *rp, int type, int flags) 19 { 20 int r, prepare_state; 21 message m; 22 endpoint_t old_endpoint; 23 24 rp->r_flags |= RS_INITIALIZING; /* now initializing */ 25 rp->r_alive_tm = getticks(); 26 rp->r_check_tm = rp->r_alive_tm + 1; /* expect reply within period */ 27 28 /* In case of RS initialization, we are done. */ 29 if(rp->r_priv.s_flags & ROOT_SYS_PROC) { 30 return OK; 31 } 32 33 /* Determine the old endpoint if this is a new instance. */ 34 old_endpoint = NONE; 35 prepare_state = SEF_LU_STATE_NULL; 36 if(rp->r_old_rp) { 37 old_endpoint = rp->r_upd.state_endpoint; 38 prepare_state = rp->r_upd.prepare_state; 39 } 40 else if(rp->r_prev_rp) { 41 old_endpoint = rp->r_prev_rp->r_pub->endpoint; 42 } 43 44 /* Check flags. */ 45 if(rp->r_pub->sys_flags & SF_USE_SCRIPT) { 46 flags |= SEF_INIT_SCRIPT_RESTART; 47 } 48 49 /* Send initialization message. */ 50 m.m_type = RS_INIT; 51 m.m_rs_init.type = (short) type; 52 m.m_rs_init.flags = flags; 53 m.m_rs_init.rproctab_gid = rinit.rproctab_gid; 54 m.m_rs_init.old_endpoint = old_endpoint; 55 m.m_rs_init.restarts = (short) rp->r_restarts+1; 56 m.m_rs_init.buff_addr = rp->r_map_prealloc_addr; 57 m.m_rs_init.buff_len = rp->r_map_prealloc_len; 58 m.m_rs_init.prepare_state = prepare_state; 59 rp->r_map_prealloc_addr = 0; 60 rp->r_map_prealloc_len = 0; 61 r = rs_asynsend(rp, &m, 0); 62 63 return r; 64 } 65 66 /*===========================================================================* 67 * fi_service * 68 *===========================================================================*/ 69 int fi_service(struct rproc *rp) 70 { 71 message m; 72 73 /* Send fault injection message. */ 74 m.m_type = COMMON_REQ_FI_CTL; 75 m.m_lsys_fi_ctl.subtype = RS_FI_CRASH; 76 return rs_asynsend(rp, &m, 0); 77 } 78 79 /*===========================================================================* 80 * fill_send_mask * 81 *===========================================================================*/ 82 void fill_send_mask(send_mask, set_bits) 83 sys_map_t *send_mask; /* the send mask to fill in */ 84 int set_bits; /* TRUE sets all bits, FALSE clears all bits */ 85 { 86 /* Fill in a send mask. */ 87 int i; 88 89 for (i = 0; i < NR_SYS_PROCS; i++) { 90 if (set_bits) 91 set_sys_bit(*send_mask, i); 92 else 93 unset_sys_bit(*send_mask, i); 94 } 95 } 96 97 /*===========================================================================* 98 * fill_call_mask * 99 *===========================================================================*/ 100 void fill_call_mask(calls, tot_nr_calls, call_mask, call_base, is_init) 101 int *calls; /* the unordered set of calls */ 102 int tot_nr_calls; /* the total number of calls */ 103 bitchunk_t *call_mask; /* the call mask to fill in */ 104 int call_base; /* the base offset for the calls */ 105 int is_init; /* set when initializing a call mask */ 106 { 107 /* Fill a call mask from an unordered set of calls. */ 108 int i; 109 int call_mask_size, nr_calls; 110 111 call_mask_size = BITMAP_CHUNKS(tot_nr_calls); 112 113 /* Count the number of calls to fill in. */ 114 nr_calls = 0; 115 for(i=0; calls[i] != NULL_C; i++) { 116 nr_calls++; 117 } 118 119 /* See if all calls are allowed and call mask must be completely filled. */ 120 if(nr_calls == 1 && calls[0] == ALL_C) { 121 for(i=0; i < call_mask_size; i++) { 122 call_mask[i] = (~0); 123 } 124 } 125 else { 126 /* When initializing, reset the mask first. */ 127 if(is_init) { 128 for(i=0; i < call_mask_size; i++) { 129 call_mask[i] = 0; 130 } 131 } 132 /* Enter calls bit by bit. */ 133 for(i=0; i < nr_calls; i++) { 134 SET_BIT(call_mask, calls[i] - call_base); 135 } 136 } 137 } 138 139 /*===========================================================================* 140 * srv_to_string_gen * 141 *===========================================================================*/ 142 char* srv_to_string_gen(struct rproc *rp, int is_verbose) 143 { 144 struct rprocpub *rpub; 145 int slot_nr; 146 char *srv_string; 147 static char srv_string_pool[3][RS_MAX_LABEL_LEN + 256]; 148 static int srv_string_pool_index = 0; 149 150 rpub = rp->r_pub; 151 slot_nr = rp - rproc; 152 srv_string = srv_string_pool[srv_string_pool_index]; 153 srv_string_pool_index = (srv_string_pool_index + 1) % 3; 154 155 #define srv_str(cmd) ((cmd) == NULL || (cmd)[0] == '\0' ? "_" : (cmd)) 156 #define srv_active_str(rp) ((rp)->r_flags & RS_ACTIVE ? "*" : " ") 157 #define srv_version_str(rp) ((rp)->r_new_rp || (rp)->r_next_rp ? "-" : \ 158 ((rp)->r_old_rp || (rp)->r_prev_rp ? "+" : " ")) 159 160 if(is_verbose) { 161 sprintf(srv_string, "service '%s'%s%s(slot %d, ep %d, pid %d, cmd %s, script %s, proc %s, major %d, flags 0x%03x, sys_flags 0x%02x)", 162 rpub->label, srv_active_str(rp), srv_version_str(rp), 163 slot_nr, rpub->endpoint, rp->r_pid, srv_str(rp->r_cmd), 164 srv_str(rp->r_script), srv_str(rpub->proc_name), rpub->dev_nr, 165 rp->r_flags, rpub->sys_flags); 166 } 167 else { 168 sprintf(srv_string, "service '%s'%s%s(slot %d, ep %d, pid %d)", 169 rpub->label, srv_active_str(rp), srv_version_str(rp), 170 slot_nr, rpub->endpoint, rp->r_pid); 171 } 172 173 #undef srv_str 174 #undef srv_active_str 175 #undef srv_version_str 176 177 return srv_string; 178 } 179 180 /*===========================================================================* 181 * srv_upd_to_string * 182 *===========================================================================*/ 183 char* srv_upd_to_string(struct rprocupd *rpupd) 184 { 185 static char srv_upd_string[256]; 186 struct rprocpub *rpub, *next_rpub, *prev_rpub; 187 rpub = rpupd->rp ? rpupd->rp->r_pub : NULL; 188 next_rpub = rpupd->next_rpupd && rpupd->next_rpupd->rp ? rpupd->next_rpupd->rp->r_pub : NULL; 189 prev_rpub = rpupd->prev_rpupd && rpupd->prev_rpupd->rp ? rpupd->prev_rpupd->rp->r_pub : NULL; 190 191 #define srv_ep(RPUB) (RPUB ? (RPUB)->endpoint : -1) 192 #define srv_upd_luflag_c(F) (rpupd->lu_flags & F ? '1' : '0') 193 #define srv_upd_iflag_c(F) (rpupd->init_flags & F ? '1' : '0') 194 195 sprintf(srv_upd_string, "update (lu_flags(SAMPNDRV)=%c%c%c%c%c%c%c%c, init_flags=(FCTD)=%c%c%c%c, state %d (%s), tm %lu, maxtime %lu, endpoint %d, state_data_gid %d, prev_ep %d, next_ep %d)", 196 srv_upd_luflag_c(SEF_LU_SELF), srv_upd_luflag_c(SEF_LU_ASR), 197 srv_upd_luflag_c(SEF_LU_MULTI), srv_upd_luflag_c(SEF_LU_PREPARE_ONLY), 198 srv_upd_luflag_c(SEF_LU_NOMMAP), srv_upd_luflag_c(SEF_LU_DETACHED), 199 srv_upd_luflag_c(SEF_LU_INCLUDES_RS), 200 srv_upd_luflag_c(SEF_LU_INCLUDES_VM), srv_upd_iflag_c(SEF_INIT_FAIL), 201 srv_upd_iflag_c(SEF_INIT_CRASH), srv_upd_iflag_c(SEF_INIT_TIMEOUT), 202 srv_upd_iflag_c(SEF_INIT_DEFCB), rpupd->prepare_state, 203 rpupd->prepare_state_data.eval_addr ? rpupd->prepare_state_data.eval_addr : "", rpupd->prepare_tm, 204 rpupd->prepare_maxtime, srv_ep(rpub), rpupd->prepare_state_data_gid, 205 srv_ep(prev_rpub), srv_ep(next_rpub)); 206 207 return srv_upd_string; 208 } 209 210 /*===========================================================================* 211 * rs_asynsend * 212 *===========================================================================*/ 213 int rs_asynsend(struct rproc *rp, message *m_ptr, int no_reply) 214 { 215 struct rprocpub *rpub; 216 int r; 217 218 rpub = rp->r_pub; 219 220 if(no_reply) { 221 r = asynsend3(rpub->endpoint, m_ptr, AMF_NOREPLY); 222 } 223 else { 224 r = asynsend(rpub->endpoint, m_ptr); 225 } 226 227 if(rs_verbose) 228 printf("RS: %s being asynsent to with message type %d, noreply=%d, result=%d\n", 229 srv_to_string(rp), m_ptr->m_type, no_reply, r); 230 231 return r; 232 } 233 234 /*===========================================================================* 235 * rs_receive_ticks * 236 *===========================================================================*/ 237 int rs_receive_ticks(endpoint_t src, message *m_ptr, 238 int *status_ptr, clock_t ticks) 239 { 240 /* IPC receive with timeout. Implemented with IPC filters. The timer 241 * management logic comes from the tickdelay(3) implementation. 242 */ 243 ipc_filter_el_t ipc_filter[2]; 244 clock_t time_left, uptime; 245 int r, s, status; 246 247 /* Use IPC filters to receive from the provided source and CLOCK only. 248 * We make the hard assumption that RS did not already have IPC filters set. 249 */ 250 memset(ipc_filter, 0, sizeof(ipc_filter)); 251 ipc_filter[0].flags = IPCF_MATCH_M_SOURCE; 252 ipc_filter[0].m_source = CLOCK; 253 ipc_filter[1].flags = IPCF_MATCH_M_SOURCE; 254 ipc_filter[1].m_source = src; 255 256 if ((s = sys_statectl(SYS_STATE_ADD_IPC_WL_FILTER, ipc_filter, 257 sizeof(ipc_filter))) != OK) 258 panic("RS: rs_receive_ticks: setting IPC filter failed: %d", s); 259 260 /* Set a new alarm, and get information about the previous alarm. */ 261 if ((s = sys_setalarm2(ticks, FALSE, &time_left, &uptime)) != OK) 262 panic("RS: rs_receive_ticks: setting alarm failed: %d", s); 263 264 /* Receive a message from either the provided source or CLOCK. */ 265 while ((r = ipc_receive(ANY, m_ptr, &status)) == OK && 266 m_ptr->m_source == CLOCK) { 267 /* Ignore early clock notifications. */ 268 if (m_ptr->m_type == NOTIFY_MESSAGE && 269 m_ptr->m_notify.timestamp >= uptime + ticks) 270 break; 271 } 272 273 /* Reinstate the previous alarm, if any. Do this in any case. */ 274 if (time_left != TMR_NEVER) { 275 if (time_left > ticks) 276 time_left -= ticks; 277 else 278 time_left = 1; /* force an alarm */ 279 280 (void)sys_setalarm(time_left, FALSE); 281 } 282 283 /* Clear the IPC filters. */ 284 if ((s = sys_statectl(SYS_STATE_CLEAR_IPC_FILTERS, NULL, 0)) != OK) 285 panic("RS: rs_receive_ticks: setting IPC filter failed: %d", s); 286 287 /* If the last received message was from CLOCK, we timed out. */ 288 if (r == OK && m_ptr->m_source == CLOCK) 289 return ENOTREADY; 290 291 if (status_ptr != NULL) 292 *status_ptr = status; 293 return r; 294 } 295 296 /*===========================================================================* 297 * reply * 298 *===========================================================================*/ 299 void reply(who, rp, m_ptr) 300 endpoint_t who; /* replyee */ 301 struct rproc *rp; /* replyee slot (if any) */ 302 message *m_ptr; /* reply message */ 303 { 304 int r; /* send status */ 305 306 /* No need to actually reply to RS */ 307 if(who == RS_PROC_NR) { 308 return; 309 } 310 311 if(rs_verbose && rp) 312 printf("RS: %s being replied to with message type %d\n", srv_to_string(rp), m_ptr->m_type); 313 314 r = ipc_sendnb(who, m_ptr); /* send the message */ 315 if (r != OK) 316 printf("RS: unable to send reply to %d: %d\n", who, r); 317 } 318 319 /*===========================================================================* 320 * late_reply * 321 *===========================================================================*/ 322 void late_reply(rp, code) 323 struct rproc *rp; /* pointer to process slot */ 324 int code; /* status code */ 325 { 326 /* If a caller is waiting for a reply, unblock it. */ 327 if(rp->r_flags & RS_LATEREPLY) { 328 message m; 329 m.m_type = code; 330 if(rs_verbose) 331 printf("RS: %s late reply %d to %d for request %d\n", 332 srv_to_string(rp), code, rp->r_caller, rp->r_caller_request); 333 334 reply(rp->r_caller, NULL, &m); 335 rp->r_flags &= ~RS_LATEREPLY; 336 } 337 } 338 339 /*===========================================================================* 340 * rs_isokendpt * 341 *===========================================================================*/ 342 int rs_isokendpt(endpoint_t endpoint, int *proc) 343 { 344 *proc = _ENDPOINT_P(endpoint); 345 if(*proc < -NR_TASKS || *proc >= NR_PROCS) 346 return EINVAL; 347 348 return OK; 349 } 350 351 /*===========================================================================* 352 * sched_init_proc * 353 *===========================================================================*/ 354 int sched_init_proc(struct rproc *rp) 355 { 356 int s; 357 int is_usr_proc; 358 359 /* Make sure user processes have no scheduler. PM deals with them. */ 360 is_usr_proc = !(rp->r_priv.s_flags & SYS_PROC); 361 if(is_usr_proc) assert(rp->r_scheduler == NONE); 362 if(!is_usr_proc) assert(rp->r_scheduler != NONE); 363 364 /* Start scheduling for the given process. */ 365 if ((s = sched_start(rp->r_scheduler, rp->r_pub->endpoint, 366 RS_PROC_NR, rp->r_priority, rp->r_quantum, rp->r_cpu, 367 &rp->r_scheduler)) != OK) { 368 return s; 369 } 370 371 return s; 372 } 373 374 /*===========================================================================* 375 * update_sig_mgrs * 376 *===========================================================================*/ 377 int update_sig_mgrs(struct rproc *rp, endpoint_t sig_mgr, 378 endpoint_t bak_sig_mgr) 379 { 380 int r; 381 struct rprocpub *rpub; 382 383 rpub = rp->r_pub; 384 385 if(rs_verbose) 386 printf("RS: %s updates signal managers: %d%s / %d\n", srv_to_string(rp), 387 sig_mgr == SELF ? rpub->endpoint : sig_mgr, 388 sig_mgr == SELF ? "(SELF)" : "", 389 bak_sig_mgr == NONE ? -1 : bak_sig_mgr); 390 391 /* Synch privilege structure with the kernel. */ 392 if ((r = sys_getpriv(&rp->r_priv, rpub->endpoint)) != OK) { 393 printf("unable to synch privilege structure: %d", r); 394 return r; 395 } 396 397 /* Set signal managers. */ 398 rp->r_priv.s_sig_mgr = sig_mgr; 399 rp->r_priv.s_bak_sig_mgr = bak_sig_mgr; 400 401 /* Update privilege structure. */ 402 r = sys_privctl(rpub->endpoint, SYS_PRIV_UPDATE_SYS, &rp->r_priv); 403 if(r != OK) { 404 printf("unable to update privilege structure: %d", r); 405 return r; 406 } 407 408 return OK; 409 } 410 411 /*===========================================================================* 412 * rs_is_idle * 413 *===========================================================================*/ 414 int rs_is_idle() 415 { 416 int slot_nr; 417 struct rproc *rp; 418 for (slot_nr = 0; slot_nr < NR_SYS_PROCS; slot_nr++) { 419 rp = &rproc[slot_nr]; 420 if (!(rp->r_flags & RS_IN_USE)) { 421 continue; 422 } 423 if(!RS_SRV_IS_IDLE(rp)) { 424 return 0; 425 } 426 } 427 return 1; 428 } 429 430 /*===========================================================================* 431 * rs_idle_period * 432 *===========================================================================*/ 433 void rs_idle_period() 434 { 435 struct rproc *rp; 436 struct rprocpub *rpub; 437 int r; 438 439 /* Not much to do when RS is not idle. */ 440 /* However, to avoid deadlocks it is absolutely necessary that during system 441 * shutdown, dead services are actually cleaned up. Override the idle check. 442 */ 443 if(!shutting_down && !rs_is_idle()) { 444 return; 445 } 446 447 /* Cleanup dead services. */ 448 for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) { 449 if((rp->r_flags & (RS_IN_USE|RS_DEAD)) == (RS_IN_USE|RS_DEAD)) { 450 cleanup_service(rp); 451 } 452 } 453 454 if (shutting_down) return; 455 456 /* Create missing replicas when necessary. */ 457 for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) { 458 rpub = rp->r_pub; 459 if((rp->r_flags & RS_ACTIVE) && (rpub->sys_flags & SF_USE_REPL) && rp->r_next_rp == NULL) { 460 if(rpub->endpoint == VM_PROC_NR && (rp->r_old_rp || rp->r_new_rp)) { 461 /* Only one replica at the time for VM. */ 462 continue; 463 } 464 if ((r = clone_service(rp, RST_SYS_PROC, 0)) != OK) { 465 printf("RS: warning: unable to clone %s (error %d)\n", 466 srv_to_string(rp), r); 467 } 468 } 469 } 470 } 471 472 /*===========================================================================* 473 * print_services_status * 474 *===========================================================================*/ 475 void print_services_status() 476 { 477 int slot_nr; 478 struct rproc *rp; 479 int num_services = 0; 480 int num_service_instances = 0; 481 int is_verbose = 1; 482 483 PRINT_SEP(); 484 printf("Printing information about all the system service instances:\n"); 485 PRINT_SEP(); 486 for (slot_nr = 0; slot_nr < NR_SYS_PROCS; slot_nr++) { 487 rp = &rproc[slot_nr]; 488 if (!(rp->r_flags & RS_IN_USE)) { 489 continue; 490 } 491 if (rp->r_flags & RS_ACTIVE) { 492 num_services++; 493 } 494 num_service_instances++; 495 printf("%s\n", srv_to_string_gen(rp, is_verbose)); 496 } 497 PRINT_SEP(); 498 printf("Found %d service instances, of which %d are active services\n", 499 num_service_instances, num_services); 500 PRINT_SEP(); 501 } 502 503 /*===========================================================================* 504 * print_update_status * 505 *===========================================================================*/ 506 void print_update_status() 507 { 508 struct rprocupd *prev_rpupd, *rpupd; 509 int is_updating = RUPDATE_IS_UPDATING(); 510 int i; 511 512 #define rupdate_flag_c(F) (rupdate.flags & F ? '1' : '0') 513 514 if(!is_updating && !RUPDATE_IS_UPD_SCHEDULED()) { 515 PRINT_SEP(); 516 printf("No update is in progress or scheduled\n"); 517 PRINT_SEP(); 518 return; 519 } 520 521 PRINT_SEP(); 522 i = 1; 523 printf("A %s-component update is %s, flags(UIRV)=%c%c%c%c:\n", RUPDATE_IS_UPD_MULTI() ? "multi" : "single", 524 is_updating ? "in progress" : "scheduled", 525 rupdate_flag_c(RS_UPDATING), rupdate_flag_c(RS_INITIALIZING), 526 rupdate.rs_rpupd ? '1' : '0', rupdate.vm_rpupd ? '1' : '0'); 527 PRINT_SEP(); 528 RUPDATE_ITER(rupdate.first_rpupd, prev_rpupd, rpupd, 529 printf("%d. %s %s %s\n", i++, srv_to_string(rpupd->rp), 530 is_updating ? "updating with" : "scheduled for", 531 srv_upd_to_string(rpupd)); 532 ); 533 PRINT_SEP(); 534 535 #undef rupdate_flag_c 536 } 537 538