1 /* This file contains some utility routines for RS. 2 * 3 * Changes: 4 * Nov 22, 2009: Created (Cristiano Giuffrida) 5 */ 6 7 #include "inc.h" 8 9 #include <assert.h> 10 #include <minix/sched.h> 11 #include "kernel/proc.h" 12 13 #define PRINT_SEP() printf("---------------------------------------------------------------------------------\n") 14 15 /*===========================================================================* 16 * init_service * 17 *===========================================================================*/ 18 int init_service(struct rproc *rp, int type, int flags) 19 { 20 int r; 21 message m; 22 endpoint_t old_endpoint; 23 24 rp->r_flags |= RS_INITIALIZING; /* now initializing */ 25 getticks(&rp->r_alive_tm); 26 rp->r_check_tm = rp->r_alive_tm + 1; /* expect reply within period */ 27 28 /* In case of RS initialization, we are done. */ 29 if(rp->r_priv.s_flags & ROOT_SYS_PROC) { 30 return OK; 31 } 32 33 /* Determine the old endpoint if this is a new instance. */ 34 old_endpoint = NONE; 35 if(rp->r_old_rp) { 36 old_endpoint = rp->r_upd.state_endpoint; 37 } 38 else if(rp->r_prev_rp) { 39 old_endpoint = rp->r_prev_rp->r_pub->endpoint; 40 } 41 42 /* Check flags. */ 43 if(rp->r_pub->sys_flags & SF_USE_SCRIPT) { 44 flags |= SEF_INIT_SCRIPT_RESTART; 45 } 46 47 /* Send initialization message. */ 48 m.m_type = RS_INIT; 49 m.m_rs_init.type = (short) type; 50 m.m_rs_init.flags = flags; 51 m.m_rs_init.rproctab_gid = rinit.rproctab_gid; 52 m.m_rs_init.old_endpoint = old_endpoint; 53 m.m_rs_init.restarts = (short) rp->r_restarts+1; 54 m.m_rs_init.buff_addr = rp->r_map_prealloc_addr; 55 m.m_rs_init.buff_len = rp->r_map_prealloc_len; 56 rp->r_map_prealloc_addr = 0; 57 rp->r_map_prealloc_len = 0; 58 r = rs_asynsend(rp, &m, 0); 59 60 return r; 61 } 62 63 /*===========================================================================* 64 * fi_service * 65 *===========================================================================*/ 66 int fi_service(struct rproc *rp) 67 { 68 message m; 69 70 /* Send fault injection message. */ 71 m.m_type = COMMON_REQ_FI_CTL; 72 m.m_lsys_fi_ctl.subtype = RS_FI_CRASH; 73 return rs_asynsend(rp, &m, 0); 74 } 75 76 /*===========================================================================* 77 * fill_send_mask * 78 *===========================================================================*/ 79 void fill_send_mask(send_mask, set_bits) 80 sys_map_t *send_mask; /* the send mask to fill in */ 81 int set_bits; /* TRUE sets all bits, FALSE clears all bits */ 82 { 83 /* Fill in a send mask. */ 84 int i; 85 86 for (i = 0; i < NR_SYS_PROCS; i++) { 87 if (set_bits) 88 set_sys_bit(*send_mask, i); 89 else 90 unset_sys_bit(*send_mask, i); 91 } 92 } 93 94 /*===========================================================================* 95 * fill_call_mask * 96 *===========================================================================*/ 97 void fill_call_mask(calls, tot_nr_calls, call_mask, call_base, is_init) 98 int *calls; /* the unordered set of calls */ 99 int tot_nr_calls; /* the total number of calls */ 100 bitchunk_t *call_mask; /* the call mask to fill in */ 101 int call_base; /* the base offset for the calls */ 102 int is_init; /* set when initializing a call mask */ 103 { 104 /* Fill a call mask from an unordered set of calls. */ 105 int i; 106 int call_mask_size, nr_calls; 107 108 call_mask_size = BITMAP_CHUNKS(tot_nr_calls); 109 110 /* Count the number of calls to fill in. */ 111 nr_calls = 0; 112 for(i=0; calls[i] != NULL_C; i++) { 113 nr_calls++; 114 } 115 116 /* See if all calls are allowed and call mask must be completely filled. */ 117 if(nr_calls == 1 && calls[0] == ALL_C) { 118 for(i=0; i < call_mask_size; i++) { 119 call_mask[i] = (~0); 120 } 121 } 122 else { 123 /* When initializing, reset the mask first. */ 124 if(is_init) { 125 for(i=0; i < call_mask_size; i++) { 126 call_mask[i] = 0; 127 } 128 } 129 /* Enter calls bit by bit. */ 130 for(i=0; i < nr_calls; i++) { 131 SET_BIT(call_mask, calls[i] - call_base); 132 } 133 } 134 } 135 136 /*===========================================================================* 137 * srv_to_string_gen * 138 *===========================================================================*/ 139 char* srv_to_string_gen(struct rproc *rp, int is_verbose) 140 { 141 struct rprocpub *rpub; 142 int slot_nr; 143 char *srv_string; 144 static char srv_string_pool[3][RS_MAX_LABEL_LEN + 256]; 145 static int srv_string_pool_index = 0; 146 147 rpub = rp->r_pub; 148 slot_nr = rp - rproc; 149 srv_string = srv_string_pool[srv_string_pool_index]; 150 srv_string_pool_index = (srv_string_pool_index + 1) % 3; 151 152 #define srv_str(cmd) ((cmd) == NULL || (cmd)[0] == '\0' ? "_" : (cmd)) 153 #define srv_active_str(rp) ((rp)->r_flags & RS_ACTIVE ? "*" : " ") 154 #define srv_version_str(rp) ((rp)->r_new_rp || (rp)->r_next_rp ? "-" : \ 155 ((rp)->r_old_rp || (rp)->r_prev_rp ? "+" : " ")) 156 157 if(is_verbose) { 158 sprintf(srv_string, "service '%s'%s%s(slot %d, ep %d, pid %d, cmd %s, script %s, proc %s, major %d, flags 0x%03x, sys_flags 0x%02x)", 159 rpub->label, srv_active_str(rp), srv_version_str(rp), 160 slot_nr, rpub->endpoint, rp->r_pid, srv_str(rp->r_cmd), 161 srv_str(rp->r_script), srv_str(rpub->proc_name), rpub->dev_nr, 162 rp->r_flags, rpub->sys_flags); 163 } 164 else { 165 sprintf(srv_string, "service '%s'%s%s(slot %d, ep %d, pid %d)", 166 rpub->label, srv_active_str(rp), srv_version_str(rp), 167 slot_nr, rpub->endpoint, rp->r_pid); 168 } 169 170 #undef srv_str 171 #undef srv_active_str 172 #undef srv_version_str 173 174 return srv_string; 175 } 176 177 /*===========================================================================* 178 * srv_upd_to_string * 179 *===========================================================================*/ 180 char* srv_upd_to_string(struct rprocupd *rpupd) 181 { 182 static char srv_upd_string[256]; 183 struct rprocpub *rpub, *next_rpub, *prev_rpub; 184 rpub = rpupd->rp ? rpupd->rp->r_pub : NULL; 185 next_rpub = rpupd->next_rpupd && rpupd->next_rpupd->rp ? rpupd->next_rpupd->rp->r_pub : NULL; 186 prev_rpub = rpupd->prev_rpupd && rpupd->prev_rpupd->rp ? rpupd->prev_rpupd->rp->r_pub : NULL; 187 188 #define srv_ep(RPUB) (RPUB ? (RPUB)->endpoint : -1) 189 #define srv_upd_luflag_c(F) (rpupd->lu_flags & F ? '1' : '0') 190 #define srv_upd_iflag_c(F) (rpupd->init_flags & F ? '1' : '0') 191 192 sprintf(srv_upd_string, "update (lu_flags(SAMPUNDRV)=%c%c%c%c%c%c%c%c%c, init_flags=(FCTD)=%c%c%c%c, state %d (%s), tm %lu, maxtime %lu, endpoint %d, state_data_gid %d, prev_ep %d, next_ep %d)", 193 srv_upd_luflag_c(SEF_LU_SELF), srv_upd_luflag_c(SEF_LU_ASR), 194 srv_upd_luflag_c(SEF_LU_MULTI), srv_upd_luflag_c(SEF_LU_PREPARE_ONLY), 195 srv_upd_luflag_c(SEF_LU_UNSAFE), srv_upd_luflag_c(SEF_LU_NOMMAP), 196 srv_upd_luflag_c(SEF_LU_DETACHED), srv_upd_luflag_c(SEF_LU_INCLUDES_RS), 197 srv_upd_luflag_c(SEF_LU_INCLUDES_VM), srv_upd_iflag_c(SEF_INIT_FAIL), 198 srv_upd_iflag_c(SEF_INIT_CRASH), srv_upd_iflag_c(SEF_INIT_TIMEOUT), 199 srv_upd_iflag_c(SEF_INIT_DEFCB), rpupd->prepare_state, 200 rpupd->prepare_state_data.eval_addr ? rpupd->prepare_state_data.eval_addr : "", rpupd->prepare_tm, 201 rpupd->prepare_maxtime, srv_ep(rpub), rpupd->prepare_state_data_gid, 202 srv_ep(prev_rpub), srv_ep(next_rpub)); 203 204 return srv_upd_string; 205 } 206 207 /*===========================================================================* 208 * rs_asynsend * 209 *===========================================================================*/ 210 int rs_asynsend(struct rproc *rp, message *m_ptr, int no_reply) 211 { 212 struct rprocpub *rpub; 213 int r; 214 215 rpub = rp->r_pub; 216 217 if(no_reply) { 218 r = asynsend3(rpub->endpoint, m_ptr, AMF_NOREPLY); 219 } 220 else { 221 r = asynsend(rpub->endpoint, m_ptr); 222 } 223 224 if(rs_verbose) 225 printf("RS: %s being asynsent to with message type %d, noreply=%d, result=%d\n", 226 srv_to_string(rp), m_ptr->m_type, no_reply, r); 227 228 return r; 229 } 230 231 /*===========================================================================* 232 * rs_receive_ticks * 233 *===========================================================================*/ 234 int rs_receive_ticks(endpoint_t src, message *m_ptr, 235 int *status_ptr, clock_t ticks) 236 { 237 /* IPC receive with timeout. Implemented with IPC filters. The timer 238 * management logic comes from the tickdelay(3) implementation. 239 */ 240 ipc_filter_el_t ipc_filter[2]; 241 clock_t time_left, uptime; 242 int r, s, status; 243 244 /* Use IPC filters to receive from the provided source and CLOCK only. 245 * We make the hard assumption that RS did not already have IPC filters set. 246 */ 247 memset(ipc_filter, 0, sizeof(ipc_filter)); 248 ipc_filter[0].flags = IPCF_MATCH_M_SOURCE; 249 ipc_filter[0].m_source = CLOCK; 250 ipc_filter[1].flags = IPCF_MATCH_M_SOURCE; 251 ipc_filter[1].m_source = src; 252 253 if ((s = sys_statectl(SYS_STATE_ADD_IPC_WL_FILTER, ipc_filter, 254 sizeof(ipc_filter))) != OK) 255 panic("RS: rs_receive_ticks: setting IPC filter failed: %d", s); 256 257 /* Set a new alarm, and get information about the previous alarm. */ 258 if ((s = sys_setalarm2(ticks, FALSE, &time_left, &uptime)) != OK) 259 panic("RS: rs_receive_ticks: setting alarm failed: %d", s); 260 261 /* Receive a message from either the provided source or CLOCK. */ 262 while ((r = ipc_receive(ANY, m_ptr, &status)) == OK && 263 m_ptr->m_source == CLOCK) { 264 /* Ignore early clock notifications. */ 265 if (m_ptr->m_type == NOTIFY_MESSAGE && 266 m_ptr->m_notify.timestamp >= uptime + ticks) 267 break; 268 } 269 270 /* Reinstate the previous alarm, if any. Do this in any case. */ 271 if (time_left != TMR_NEVER) { 272 if (time_left > ticks) 273 time_left -= ticks; 274 else 275 time_left = 1; /* force an alarm */ 276 277 (void)sys_setalarm(time_left, FALSE); 278 } 279 280 /* Clear the IPC filters. */ 281 if ((s = sys_statectl(SYS_STATE_CLEAR_IPC_FILTERS, NULL, 0)) != OK) 282 panic("RS: rs_receive_ticks: setting IPC filter failed: %d", s); 283 284 /* If the last received message was from CLOCK, we timed out. */ 285 if (r == OK && m_ptr->m_source == CLOCK) 286 return ENOTREADY; 287 288 if (status_ptr != NULL) 289 *status_ptr = status; 290 return r; 291 } 292 293 /*===========================================================================* 294 * reply * 295 *===========================================================================*/ 296 void reply(who, rp, m_ptr) 297 endpoint_t who; /* replyee */ 298 struct rproc *rp; /* replyee slot (if any) */ 299 message *m_ptr; /* reply message */ 300 { 301 int r; /* send status */ 302 303 /* No need to actually reply to RS */ 304 if(who == RS_PROC_NR) { 305 return; 306 } 307 308 if(rs_verbose && rp) 309 printf("RS: %s being replied to with message type %d\n", srv_to_string(rp), m_ptr->m_type); 310 311 r = ipc_sendnb(who, m_ptr); /* send the message */ 312 if (r != OK) 313 printf("RS: unable to send reply to %d: %d\n", who, r); 314 } 315 316 /*===========================================================================* 317 * late_reply * 318 *===========================================================================*/ 319 void late_reply(rp, code) 320 struct rproc *rp; /* pointer to process slot */ 321 int code; /* status code */ 322 { 323 /* If a caller is waiting for a reply, unblock it. */ 324 if(rp->r_flags & RS_LATEREPLY) { 325 message m; 326 m.m_type = code; 327 if(rs_verbose) 328 printf("RS: %s late reply %d to %d for request %d\n", 329 srv_to_string(rp), code, rp->r_caller, rp->r_caller_request); 330 331 reply(rp->r_caller, NULL, &m); 332 rp->r_flags &= ~RS_LATEREPLY; 333 } 334 } 335 336 /*===========================================================================* 337 * rs_isokendpt * 338 *===========================================================================*/ 339 int rs_isokendpt(endpoint_t endpoint, int *proc) 340 { 341 *proc = _ENDPOINT_P(endpoint); 342 if(*proc < -NR_TASKS || *proc >= NR_PROCS) 343 return EINVAL; 344 345 return OK; 346 } 347 348 /*===========================================================================* 349 * sched_init_proc * 350 *===========================================================================*/ 351 int sched_init_proc(struct rproc *rp) 352 { 353 int s; 354 int is_usr_proc; 355 356 /* Make sure user processes have no scheduler. PM deals with them. */ 357 is_usr_proc = !(rp->r_priv.s_flags & SYS_PROC); 358 if(is_usr_proc) assert(rp->r_scheduler == NONE); 359 if(!is_usr_proc) assert(rp->r_scheduler != NONE); 360 361 /* Start scheduling for the given process. */ 362 if ((s = sched_start(rp->r_scheduler, rp->r_pub->endpoint, 363 RS_PROC_NR, rp->r_priority, rp->r_quantum, rp->r_cpu, 364 &rp->r_scheduler)) != OK) { 365 return s; 366 } 367 368 return s; 369 } 370 371 /*===========================================================================* 372 * update_sig_mgrs * 373 *===========================================================================*/ 374 int update_sig_mgrs(struct rproc *rp, endpoint_t sig_mgr, 375 endpoint_t bak_sig_mgr) 376 { 377 int r; 378 struct rprocpub *rpub; 379 380 rpub = rp->r_pub; 381 382 if(rs_verbose) 383 printf("RS: %s updates signal managers: %d%s / %d\n", srv_to_string(rp), 384 sig_mgr == SELF ? rpub->endpoint : sig_mgr, 385 sig_mgr == SELF ? "(SELF)" : "", 386 bak_sig_mgr == NONE ? -1 : bak_sig_mgr); 387 388 /* Synch privilege structure with the kernel. */ 389 if ((r = sys_getpriv(&rp->r_priv, rpub->endpoint)) != OK) { 390 printf("unable to synch privilege structure: %d", r); 391 return r; 392 } 393 394 /* Set signal managers. */ 395 rp->r_priv.s_sig_mgr = sig_mgr; 396 rp->r_priv.s_bak_sig_mgr = bak_sig_mgr; 397 398 /* Update privilege structure. */ 399 r = sys_privctl(rpub->endpoint, SYS_PRIV_UPDATE_SYS, &rp->r_priv); 400 if(r != OK) { 401 printf("unable to update privilege structure: %d", r); 402 return r; 403 } 404 405 return OK; 406 } 407 408 /*===========================================================================* 409 * rs_is_idle * 410 *===========================================================================*/ 411 int rs_is_idle() 412 { 413 int slot_nr; 414 struct rproc *rp; 415 for (slot_nr = 0; slot_nr < NR_SYS_PROCS; slot_nr++) { 416 rp = &rproc[slot_nr]; 417 if (!(rp->r_flags & RS_IN_USE)) { 418 continue; 419 } 420 if(!RS_SRV_IS_IDLE(rp)) { 421 return 0; 422 } 423 } 424 return 1; 425 } 426 427 /*===========================================================================* 428 * rs_idle_period * 429 *===========================================================================*/ 430 void rs_idle_period() 431 { 432 struct rproc *rp; 433 struct rprocpub *rpub; 434 int r; 435 436 /* Not much to do when RS is not idle. */ 437 /* However, to avoid deadlocks it is absolutely necessary that during system 438 * shutdown, dead services are actually cleaned up. Override the idle check. 439 */ 440 if(!shutting_down && !rs_is_idle()) { 441 return; 442 } 443 444 /* Cleanup dead services. */ 445 for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) { 446 if((rp->r_flags & (RS_IN_USE|RS_DEAD)) == (RS_IN_USE|RS_DEAD)) { 447 cleanup_service(rp); 448 } 449 } 450 451 if (shutting_down) return; 452 453 /* Create missing replicas when necessary. */ 454 for (rp=BEG_RPROC_ADDR; rp<END_RPROC_ADDR; rp++) { 455 rpub = rp->r_pub; 456 if((rp->r_flags & RS_ACTIVE) && (rpub->sys_flags & SF_USE_REPL) && rp->r_next_rp == NULL) { 457 if(rpub->endpoint == VM_PROC_NR && (rp->r_old_rp || rp->r_new_rp)) { 458 /* Only one replica at the time for VM. */ 459 continue; 460 } 461 if ((r = clone_service(rp, RST_SYS_PROC, 0)) != OK) { 462 printf("RS: warning: unable to clone %s (error %d)\n", 463 srv_to_string(rp), r); 464 } 465 } 466 } 467 } 468 469 /*===========================================================================* 470 * print_services_status * 471 *===========================================================================*/ 472 void print_services_status() 473 { 474 int slot_nr; 475 struct rproc *rp; 476 int num_services = 0; 477 int num_service_instances = 0; 478 int is_verbose = 1; 479 480 PRINT_SEP(); 481 printf("Printing information about all the system service instances:\n"); 482 PRINT_SEP(); 483 for (slot_nr = 0; slot_nr < NR_SYS_PROCS; slot_nr++) { 484 rp = &rproc[slot_nr]; 485 if (!(rp->r_flags & RS_IN_USE)) { 486 continue; 487 } 488 if (rp->r_flags & RS_ACTIVE) { 489 num_services++; 490 } 491 num_service_instances++; 492 printf("%s\n", srv_to_string_gen(rp, is_verbose)); 493 } 494 PRINT_SEP(); 495 printf("Found %d service instances, of which %d are active services\n", 496 num_service_instances, num_services); 497 PRINT_SEP(); 498 } 499 500 /*===========================================================================* 501 * print_update_status * 502 *===========================================================================*/ 503 void print_update_status() 504 { 505 struct rprocupd *prev_rpupd, *rpupd; 506 int is_updating = RUPDATE_IS_UPDATING(); 507 int i; 508 509 #define rupdate_flag_c(F) (rupdate.flags & F ? '1' : '0') 510 511 if(!is_updating && !RUPDATE_IS_UPD_SCHEDULED()) { 512 PRINT_SEP(); 513 printf("No update is in progress or scheduled\n"); 514 PRINT_SEP(); 515 return; 516 } 517 518 PRINT_SEP(); 519 i = 1; 520 printf("A %s-component update is %s, flags(UIRV)=%c%c%c%c:\n", RUPDATE_IS_UPD_MULTI() ? "multi" : "single", 521 is_updating ? "in progress" : "scheduled", 522 rupdate_flag_c(RS_UPDATING), rupdate_flag_c(RS_INITIALIZING), 523 rupdate.rs_rpupd ? '1' : '0', rupdate.vm_rpupd ? '1' : '0'); 524 PRINT_SEP(); 525 RUPDATE_ITER(rupdate.first_rpupd, prev_rpupd, rpupd, 526 printf("%d. %s %s %s\n", i++, srv_to_string(rpupd->rp), 527 is_updating ? "updating with" : "scheduled for", 528 srv_upd_to_string(rpupd)); 529 ); 530 PRINT_SEP(); 531 532 #undef rupdate_flag_c 533 } 534 535