1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <stdio.h> 28 #include <sys/types.h> 29 #include <dirent.h> 30 #include <stdarg.h> 31 #include <stddef.h> 32 #include <stdlib.h> 33 #include <dlfcn.h> 34 #include <door.h> 35 #include <errno.h> 36 #include <fcntl.h> 37 #include <strings.h> 38 #include <unistd.h> 39 #include <synch.h> 40 #include <syslog.h> 41 #include <pthread.h> 42 #include <thread.h> 43 #include <signal.h> 44 #include <limits.h> 45 #include <locale.h> 46 #include <sys/stat.h> 47 #include <sys/systeminfo.h> 48 #include <sys/wait.h> 49 #include <sys/processor.h> 50 #include <sys/pset.h> 51 #include <ctype.h> 52 #include <poll.h> 53 #include <sys/wait.h> 54 #include <sys/pm.h> 55 #include <sys/iso/signal_iso.h> 56 #include <sys/procset.h> 57 58 #include "fpsapi.h" 59 #include "fpsd.h" 60 #include "messages.h" 61 62 /* Local Functions */ 63 64 static int 65 check_invoke_prog(int devid, time_t *last, 66 unsigned tstswap, int frequency, int group_no, int fpu_index); 67 68 static int identify_fpu_to_run_test(int *freq, int *iteration, int *fpu_index); 69 70 void *test_fpu_thr(void *arg); 71 72 #define CPU_TST_FORK_FAIL {\ 73 error = errno; \ 74 fpsd_message(FPSD_NO_EXIT, FPS_ERROR, FORK_FAIL_MSG, \ 75 testpath, strerror(error)); \ 76 return (-1); \ 77 } 78 79 #define CPU_TST_EXEC_FAIL { \ 80 error = errno; \ 81 fpsd_message(FPSD_EXIT_ERROR,\ 82 FPS_ERROR, TST_EXEC_FAIL, testpath, strerror(error)); \ 83 } 84 85 static int boot_tst_delay = FPS_BOOT_TST_DELAY; 86 87 /* Increments failure for the cpu */ 88 static void 89 record_failure(int devid, int index) { 90 if ((index >= 0) && 91 (index < fpsd.d_conf->m_cpuids_size)) { 92 fpsd.d_conf->m_cpus[index].num_failures++; 93 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, 94 RECORD_FAILURE_MSG, devid, index); 95 } 96 } 97 98 /* Returns 1 if testing is diabled for the cpu, else 0 */ 99 100 static int 101 check_if_disabled(int fpu_index) { 102 int is_disabled; 103 104 is_disabled = fpsd.d_conf->m_cpus[fpu_index].disable_test; 105 if (is_disabled) { 106 return (1); 107 } else { 108 return (0); 109 } 110 } 111 112 /* 113 * Forks and executes "fptest" and waits for an amount 114 * of time equal to the time to schedule next "fptest". 115 * Times out if the test does not complete and unbinds 116 * and terminates the test. 117 * Return : 0 = Nothing Invoked. 1 = invoked OK. -1 = Failure. 118 */ 119 static int 120 check_invoke_prog(int devid, /* cpu-id */ 121 time_t *last, /* Last time it was invoked */ 122 unsigned tstswap, /* Expected swap space required for test */ 123 int frequency, /* Frequency of the processor under test */ 124 int group_no, /* Group no. ==> matrix size to be used */ 125 int fpu_index) 126 { 127 int error; 128 hrtime_t start_hrtime = 0, end_hrtime = 0, hrmsecs = 0; 129 hrtime_t hrsecs = 0; 130 pid_t pid = -1; 131 int exit_status = 0; 132 char cpuid_c[64]; 133 char frequency_c[10]; 134 char group_c[10]; 135 int ret = 0; 136 int status = 0; 137 char *testpath; 138 char sig_str[32]; 139 int elapsed_time; 140 int status_available; 141 int max_timeout; 142 int pb_ret; 143 144 testpath = fpsd.d_conf->m_cpus[fpu_index].fptest_path; 145 if (check_if_disabled(fpu_index)) { 146 return (0); 147 } 148 149 /* Compare all in seconds. */ 150 151 *last = time(NULL); 152 153 (void) snprintf(cpuid_c, sizeof (cpuid_c), "%d", devid); 154 (void) snprintf(frequency_c, sizeof (frequency_c), "%d", frequency); 155 (void) snprintf(group_c, sizeof (group_c), "%d", group_no); 156 157 /* Check if enough swap space is there; Return 0 if not. */ 158 159 if (get_free_swap() < (uint64_t)(tstswap+FPS_SWAP_RESERVE)) { 160 fpsd_message(FPSD_NO_EXIT, FPS_WARNING, SWAP_WARN, testpath); 161 return (ret); 162 } 163 164 fpsd_message(FPSD_NO_EXIT, FPS_INFO, START_TEST_MSG, 165 testpath, frequency_c, group_c, cpuid_c); 166 167 start_hrtime = gethrtime(); 168 169 pid = fork1(); /* fork1() duplicates only the calling thread */ 170 if (pid == 0) { 171 (void) execl(testpath, /* Path */ 172 FPS_FPUTST_NAME, /* Arg 0 */ 173 "-f", 174 frequency_c, /* Frequency */ 175 "-p", 176 group_c, /* Group no. */ 177 "-d", 178 cpuid_c, /* CPU ID */ 179 (char *)NULL); 180 181 CPU_TST_EXEC_FAIL /* Should never reach here */ 182 } 183 184 if (pid == -1) 185 CPU_TST_FORK_FAIL 186 187 /* Synchronously wait here till the child dies */ 188 189 elapsed_time = 0; 190 status_available = 0; 191 max_timeout = fpsd.d_interval * 1000; 192 while (elapsed_time < max_timeout) { 193 if (pid == waitpid((pid_t)pid, &status, WNOHANG)) { 194 status_available = 1; 195 break; 196 } else { 197 elapsed_time += 50; 198 (void) poll(NULL, 0, 50); /* wait 50 milli sec. */ 199 } 200 } 201 202 if (!status_available) { 203 exit_status = FPU_TIMED_OUT; 204 } else { 205 exit_status = WEXITSTATUS(status); 206 if (exit_status == 0xFF) { 207 /* As WEXITSTATUS returns 0xFF */ 208 exit_status = FPU_UNSUPPORT; 209 } 210 } 211 if (exit_status == FPU_UNSUPPORT) { 212 /* Reprobe */ 213 fpsd.d_conf->m_reprobe = 1; 214 ret = 1; 215 } else if (exit_status == FPU_OK) { 216 /* Increment iteration */ 217 fpsd.d_iteration++; 218 ret = 1; 219 } else if ((exit_status == FPU_FOROFFLINE) || 220 (exit_status == FPU_BIND_FAIL)) { 221 /* Force reprobe */ 222 fpsd.d_conf->m_reprobe = 1; 223 ret = 1; 224 } else if (exit_status == FPU_INVALID_ARG) { 225 /* This should not happen; so force exit */ 226 fpsd_message(FPSD_EXIT_TEST_USAGE, FPS_ERROR, 227 FPU_INVALID_ARG_MSG); 228 } else if ((exit_status == FPU_SIG_SEGV) || 229 (exit_status == FPU_SIG_BUS)) { 230 fpsd_message(FPSD_NO_EXIT, FPS_ERROR, FPU_SIG_RCVD, 231 devid); 232 record_failure(devid, fpu_index); 233 ret = -1; /* Retry */ 234 } else if (exit_status == FPU_SIG_FPE) { 235 fpsd_message(FPSD_NO_EXIT, FPS_ERROR, FPU_FPE_MSG, 236 devid); 237 record_failure(devid, fpu_index); 238 ret = -1; 239 } else if (exit_status == FPU_SIG_ILL) { 240 fpsd_message(FPSD_NO_EXIT, FPS_ERROR, FPU_SIG_ILL_MSG, 241 devid); 242 record_failure(devid, fpu_index); 243 ret = -1; 244 } else if (exit_status == FPU_SYSCALL_FAIL) { 245 fpsd_message(FPSD_NO_EXIT, FPS_ERROR, FPU_SYSCALL_FAIL_MSG, 246 devid); 247 record_failure(devid, fpu_index); 248 fpsd.d_iteration++; /* Iteration skipped */ 249 ret = 1; /* Record failure and move on */ 250 } else if (exit_status == FPU_EREPORT_INCOM) { 251 fpsd_message(FPSD_NO_EXIT, FPS_ERROR, FPU_EREPORT_INCOM_MSG, 252 devid); 253 fpsd.d_conf->m_reprobe = 1; 254 ret = 1; 255 } else if (exit_status == FPU_SYSCALL_TRYAGAIN) { 256 fpsd_message(FPSD_NO_EXIT, FPS_ERROR, FPU_SYSCALL_TRYAGAIN_MSG); 257 ret = -1; /* Retry as it could be some resource issue */ 258 } else if (exit_status == FPU_EREPORT_FAIL) { 259 fpsd_message(FPSD_NO_EXIT, FPS_ERROR, FPU_EREPORT_FAIL_MSG, 260 devid); 261 ret = -1; 262 } else if (exit_status == FPU_TIMED_OUT) { 263 pb_ret = processor_bind(P_PID, pid, PBIND_NONE, NULL); 264 if (pb_ret == -1) { 265 fpsd_message(FPSD_NO_EXIT, FPS_INFO, 266 UNBIND_FAIL_MSG, 267 strerror(errno)); 268 } 269 (void) kill(pid, SIGINT); 270 while (pid != waitpid((pid_t)pid, &status, WUNTRACED)) { 271 (void) poll(NULL, 0, 10); 272 (void) kill(pid, SIGINT); 273 } 274 fpsd_message(FPSD_NO_EXIT, FPS_ERROR, FPU_TIMED_OUT_MSG, devid); 275 record_failure(devid, fpu_index); 276 ret = -1; 277 } 278 279 /* 280 * The following is the case if the test ended due to a 281 * signal and did not have a handler for the signal. 282 */ 283 if (WIFSIGNALED(status)) { 284 (void) sig2str(WTERMSIG(status), sig_str); 285 fpsd_message(FPSD_NO_EXIT, FPS_INFO, 286 TST_SIGNALED_MSG, devid, 287 frequency, sig_str); 288 record_failure(devid, fpu_index); 289 ret = -1; /* Retry */ 290 } 291 292 end_hrtime = gethrtime(); 293 hrmsecs = ((end_hrtime - start_hrtime)/ 294 ((hrtime_t)1000*1000)); 295 hrsecs = hrmsecs / 1000; 296 fpsd_message(FPSD_NO_EXIT, FPS_INFO, END_TST_MSG, (int)pid, 297 (int)(hrsecs/(60*60)), 298 (int)((hrsecs%3600)/60), 299 (int)(hrsecs%60), 300 (int)(hrmsecs%1000), 301 cpuid_c); 302 303 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, EXIT_STAT_MSG, exit_status); 304 305 return (ret); 306 } 307 308 /* 309 * The test scheduling thread. 310 */ 311 312 void * 313 test_fpu_thr(/* ARGSUSED */ void *arg) 314 { 315 time_t cur = 0, /* current time in secs */ 316 last = 0; /* Last time this level testing done in secs */ 317 int ret; 318 319 int intvl = 0; /* interval */ 320 unsigned tswap = 0; 321 int poll_intvl; 322 long num_cpus; 323 int idle = 0, remain = 0, max_remain = 0; 324 time_t last_wakeup = 0, wakeup_elapse; 325 int fpuid; 326 int frequency; 327 int group_no; 328 329 int force_skip_test_if_pm_idle = 1; 330 int fpu_index; 331 int max_idle_time_4_tst_run; 332 int j; 333 334 /* 335 * If enabled, do not run test on idle system, even if test intvl 336 * explicitly specified. 337 */ 338 339 /* 340 * Minimum time to wait before scheduling tests 341 * when the system just wakes up from sleep. 342 */ 343 #define MINSLEEP 8 344 345 num_cpus = fpsd.d_conf->m_num_on_fpuids; 346 347 intvl = poll_intvl = fpsd.d_interval; 348 349 tswap = FPS_LOWTST_SWAP; 350 351 cur = time(NULL); 352 353 /* 354 * Initialize last time test done based on earlier bootup testing. 355 * This decides when the first time scheduling of the test is 356 * to be done. 357 */ 358 359 /* 360 * In systems with less than 3 processors, the initial testing 361 * has been found to affect the system bootup time. 362 * Wait for 5 min for those systems before starting any testing. 363 */ 364 365 if (num_cpus < 3) 366 fps_wait_secs(boot_tst_delay); 367 368 /* Soft bind before once before starting test. */ 369 if (processor_bind(P_PID, P_MYID, PBIND_SOFT, NULL) != 0) { 370 fpsd_message(FPSD_EXIT_ERROR, FPS_ERROR, SYSTEM_CALL_FAIL, 371 "processor_bind", strerror(errno)); 372 } 373 374 if (pset_bind(PS_SOFT, P_PID, P_MYID, NULL) != 0) { 375 fpsd_message(FPSD_EXIT_ERROR, FPS_ERROR, SYSTEM_CALL_FAIL, 376 "pset_bind", strerror(errno)); 377 } 378 379 #define MAX_IDLE_TIME_FOR_TSTRUN 10 380 381 if (intvl/2 > MAX_IDLE_TIME_FOR_TSTRUN) { 382 max_idle_time_4_tst_run = 383 MAX_IDLE_TIME_FOR_TSTRUN; 384 } else { 385 max_idle_time_4_tst_run = 386 (intvl/2) + 387 MAX_TEST_RUN_TIME; 388 } 389 390 cur = time(NULL); 391 last = 0; /* Force the invocation by setting last to zero. */ 392 393 394 for (;;) { 395 time_t elapse; 396 397 cur = time(NULL); 398 elapse = cur - last; 399 400 /* 401 * Sleep for intvl secs amount of time. 402 */ 403 404 if (elapse >= (time_t)intvl) 405 poll_intvl = 0; 406 else /* Don't sleep more than 1 min at a time */ 407 poll_intvl = (int)((time_t)intvl-elapse); 408 409 /* 410 * Until poll_intvl becomes zero, sleep. 411 * If poll gets interrupted for any reason, then also works. 412 */ 413 414 if (poll_intvl > 0) { 415 (void) poll(NULL, 0, poll_intvl*1000); 416 continue; 417 } 418 419 #define INVOKE_PROG { \ 420 fpuid = identify_fpu_to_run_test(&frequency, &group_no, &fpu_index);\ 421 if (intvl != fpsd.d_interval) { \ 422 /* \ 423 * Interval has changed due to change in \ 424 * online processors/ config properties. \ 425 */ \ 426 intvl = fpsd.d_interval; \ 427 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, \ 428 INTVL_CHANGED_MSG, intvl); \ 429 } \ 430 if (fpuid == -1) {\ 431 /* Testing could not be done on any cpu */\ 432 (void) poll(NULL, 0, 20); /* Wait for some time */\ 433 continue;\ 434 }\ 435 ret = check_invoke_prog(fpuid, &last, tswap, frequency, \ 436 group_no, fpu_index); \ 437 if (ret == -1) { \ 438 for (j = 0; (j < MAX_RETRIES) && (ret != 1); j++) { \ 439 (void) poll(NULL, 0, RETRY_INTVL); \ 440 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, \ 441 RESCHEDULE_MSG, fpuid);\ 442 ret = check_invoke_prog(fpuid, &last, tswap, \ 443 frequency, group_no, fpu_index); \ 444 } \ 445 if (ret == -1) { \ 446 /*\ 447 * Tried MAX_RETRIES times. Still seeing failures\ 448 * on this fpu. Skip this iteration and move on.\ 449 */\ 450 fpsd.d_iteration++; \ 451 } \ 452 } \ 453 } 454 455 /* 456 * If power management is disabled (or not supported) on the 457 * system, just go ahead, invoke the program. 458 */ 459 update_pm_state(); /* Update current PM state. */ 460 if (sys_pm_state != PM_SYSTEM_PM_ENABLED) { 461 /* autopm disabled. Just go ahead invoke program. */ 462 INVOKE_PROG 463 continue; 464 } 465 466 /* 467 * Power management is enabled. This system may be CPU PM 468 * enabled system or just disk(and other) PM enabled. 469 * If CPU PM not supported, just invoke the program. 470 */ 471 if (!is_estar_system) { 472 INVOKE_PROG 473 continue; 474 } 475 476 /* This system is CPU PM currently supported & enabled. */ 477 478 /* 479 * By deafult, tests are not invoked on E* compliant system. 480 * However if force_skip_test_if_pm_idle is set to 0, tests 481 * will be invoked. This is kept for debugging purposes for now. 482 * Should be removed if no use cases. 483 */ 484 485 if (!force_skip_test_if_pm_idle) { 486 INVOKE_PROG 487 continue; 488 } 489 490 /* 491 * If the system is in sleep mode, wait until it comes 492 * to full power mode. 493 */ 494 495 /* If CPUs are not in full power mode, this will return -1 */ 496 ret = get_idle_rem_stats(&idle, &remain, &max_remain); 497 498 /* 499 * Wait until CPU comes to full power mode. 500 * Call wait for state change function -- the return from the 501 * function does not guarantee that the system is in full power 502 * mode. So get the current status later as well. 503 */ 504 if (ret == -1) { 505 while (ret == -1) { 506 /* Avoid busy loop in any case */ 507 (void) poll(NULL, 0, 1000); 508 /* Wait until CPU comes to full pwr */ 509 wait_for_pm_state_change(); 510 ret = get_idle_rem_stats(&idle, &remain, 511 &max_remain); 512 } 513 514 /* Remember the last time that we woke up. */ 515 last_wakeup = time(NULL); 516 } 517 518 /* 519 * To meet E* requirements, the system should go to 520 * deep sleep mode in 30 mins on default configs. 521 * The CPU power management does this by 14.25min+14.25min 522 * so total 28.5mins. (in sleep mode followed by deep sleep). 523 * Running the test as the system just becomes active, 524 * may reset the idle counter and may delay the transition. 525 * However since we have 1.5 mins cushion to meet E* 526 * requirements, we are just making use of it. 527 * 528 * If system is idle for more than 10 seconds, wait 529 * until the system idle time is less than 10 seconds. 530 * Poll in 2 sec interval, so we will catch it as soon 531 * as the system idle time goes low (as it just becomes busy). 532 * Basically don't run test on an idle system. 533 * If the system is continously busy, then this will 534 * result in continously scheduling the test. 535 * 536 * Running test on a system which is just 10 seconds idle, 537 * may reset the idle counter. 538 * This will postpone the idle transition to it's lowest power 539 * by worst case of 10 secs + worst case run time for fptest 540 * that is approximately 1 sec = 11 sec. 541 * This is below the 1.5mins cushion CPU PM now has to make 542 * idle transition. 543 * 544 * So if d_interval/2 >= 10 follow the above logic. Else, reduce 545 * max_idle_time_4_tstrun = d_interval/2 + max_time_taken_by_test 546 * (which is <= 1s). We want to be conservative in scheduling 547 * test rather than utilize the cushion to maximum possible 548 * extent. 549 * Note: The E* desktop systems have atmost 2 processors, but 550 * this will work even for more processors in which case the 551 * interval will be less or if the interval is configured thro' 552 * SMF. 553 * As long as atleast any one processor is in full power mode, 554 * all processors have to be in same power level. 555 */ 556 557 /* Invoke program if system is "busy" */ 558 559 if (idle <= max_idle_time_4_tst_run) { 560 /* 561 * If the system is just waking up from sleep, don't rush into 562 * testing immediately to avoid hiccups in performance. 563 * 564 */ 565 wakeup_elapse = time(NULL) - last_wakeup; 566 if (wakeup_elapse < MINSLEEP) { 567 fps_wait_secs((int)(MINSLEEP-wakeup_elapse)); 568 } 569 INVOKE_PROG 570 continue; 571 } 572 573 /* The system is "idle". Wait until it becomes "busy" */ 574 while (idle > max_idle_time_4_tst_run) { 575 576 /* 577 * Once in max_idle_time_4_tst_run/2 secs, we are issuing 578 * ioctl call to catch the system as soon as it becomes 579 * "busy". Polling is not an efficient way to do this, 580 * but this is the only way we got right now. 581 */ 582 fps_wait_secs(max_idle_time_4_tst_run / 2); 583 ret = get_idle_rem_stats(&idle, &remain, &max_remain); 584 if (ret == -1) break; /* Incase now in sleep mode */ 585 } 586 continue; 587 588 } /* End infinite for loop */ 589 590 #pragma error_messages(off, E_STATEMENT_NOT_REACHED) 591 /* NOTREACHED */ 592 return (NULL); 593 } 594 595 /* 596 * get_num_onln_cpus(): returns the number of processors that are in 597 * "on-line" state only. This number will be less than the number 598 * returned by sysconf(_SC_NPROCESSORS_ONLN) if there are some 599 * processors in "no-intr" state. 600 */ 601 602 static int 603 get_num_onln_cpus() 604 { 605 int i; 606 int num_onln = 0; 607 int total_onln = sysconf(_SC_NPROCESSORS_ONLN); 608 609 for (i = 0; i < fpsd.d_conf->m_max_cpuid; i++) { 610 if (p_online(i, P_STATUS) == P_ONLINE) { 611 num_onln++; 612 } 613 if (num_onln == total_onln) { 614 /* Break after all onln cpuids found */ 615 break; 616 } 617 } 618 return (num_onln); 619 } 620 621 /* 622 * Identifies the fpu on which test will be scheduled next. 623 */ 624 625 static int 626 identify_fpu_to_run_test(int *freq, int *iteration, int *fpu_index) 627 { 628 int fpuid = -1; 629 int ascend; 630 int tmp_iter; 631 fps_cpu_t fps_cpu; 632 int i; 633 int num_onln; 634 /* Timestamp at which SIGHUP ts was checked last */ 635 static hrtime_t ts_hup_chkd = 0; 636 hrtime_t tmp_ts; 637 638 *iteration = *freq = 0; 639 while (fpuid == -1) { 640 /* Check if the number of online processors has changed */ 641 num_onln = get_num_onln_cpus(); 642 if (num_onln != fpsd.d_conf->m_num_on_fpuids) { 643 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, REPROBE_MSG); 644 fpsd.d_conf->m_reprobe = 1; 645 } 646 647 tmp_ts = fpsd.d_ts_hup; 648 if (fpsd.d_ts_hup > ts_hup_chkd) { 649 fpsd.d_conf->m_reprobe = 1; 650 } 651 ts_hup_chkd = tmp_ts; 652 653 if (1 == fpsd.d_conf->m_reprobe) { 654 fpsd_read_config(); 655 } 656 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, IDENTIFY_FPU_MSG, 657 fpsd.d_fpuid_index, fpsd.d_iteration, 658 fpsd.d_conf->total_iter, fpsd.d_conf->m_cpuids_size); 659 if (fpsd.d_iteration == fpsd.d_conf->total_iter) { 660 /* One pass completed */ 661 fpsd.d_iteration = 0; 662 663 /* Reinit iterations */ 664 for (i = 0; i < fpsd.d_conf->m_cpuids_size; i++) { 665 if (fpsd.d_conf->m_cpus[i].disable_test) 666 continue; 667 ascend = fpsd.d_conf->m_cpus[i].asc; 668 if (ascend) { 669 fpsd.d_conf->m_cpus[i].previous_iteration = 0; 670 } else { 671 fpsd.d_conf->m_cpus[i].previous_iteration = 672 fpsd.d_conf->m_cpus[i].total_iterations + 1; 673 } 674 } 675 } 676 if (fpsd.d_iteration == 0) { /* Beginning of one pass */ 677 fpsd.d_fpuid_index = 0; 678 while (fpsd.d_fpuid_index < 679 fpsd.d_conf->m_cpuids_size) { 680 if (fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].\ 681 disable_test) { 682 fpsd.d_fpuid_index++; 683 } else { 684 break; 685 } 686 } 687 if (fpsd.d_fpuid_index == fpsd.d_conf->m_cpuids_size) { 688 return (-1); 689 } 690 } else { 691 if (fpsd.d_fpuid_index == 692 (fpsd.d_conf->m_cpuids_size-1)) { 693 /* One iteration done for all fpus */ 694 fpsd.d_fpuid_index = 0; 695 } else { 696 fpsd.d_fpuid_index++; 697 } 698 } 699 fps_cpu = fpsd.d_conf->m_cpus[fpsd.d_fpuid_index]; 700 fpuid = fps_cpu.cpuid; 701 if (fps_cpu.disable_test) { 702 fpuid = -1; 703 continue; 704 } 705 *freq = fps_cpu.frequency; 706 707 /* Find the iteration no. */ 708 tmp_iter = fps_cpu.previous_iteration; 709 ascend = fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].asc; 710 if (ascend) { 711 if (tmp_iter == fps_cpu.total_iterations) { 712 /* 713 * 1 pass completed for this fpu; 714 * skip this fpu and goto the next fpu 715 */ 716 fpuid = -1; 717 continue; 718 } else { 719 fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].\ 720 previous_iteration++; 721 } 722 } else { 723 /* This FPU is tested in descending order of */ 724 /* iteration no. ==> matrix size */ 725 if (tmp_iter == 1) { 726 /* 727 * 1 pass completed for this fpu; 728 * skip this fpu and goto the next fpu 729 */ 730 fpuid = -1; 731 continue; 732 } else { 733 fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].\ 734 previous_iteration--; 735 } 736 } 737 *iteration = 738 fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].previous_iteration; 739 *fpu_index = fpsd.d_fpuid_index; 740 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, IDENTIFY_FPU_RTN_MSG, 741 fpuid, *iteration, *freq, 742 fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].\ 743 previous_iteration, 744 fps_cpu.total_iterations); 745 } 746 return (fpuid); 747 } 748