1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <stdio.h> 28 #include <sys/types.h> 29 #include <dirent.h> 30 #include <stdarg.h> 31 #include <stddef.h> 32 #include <stdlib.h> 33 #include <dlfcn.h> 34 #include <door.h> 35 #include <errno.h> 36 #include <fcntl.h> 37 #include <strings.h> 38 #include <unistd.h> 39 #include <synch.h> 40 #include <syslog.h> 41 #include <pthread.h> 42 #include <thread.h> 43 #include <signal.h> 44 #include <limits.h> 45 #include <locale.h> 46 #include <sys/stat.h> 47 #include <sys/systeminfo.h> 48 #include <sys/wait.h> 49 #include <sys/processor.h> 50 #include <sys/pset.h> 51 #include <ctype.h> 52 #include <poll.h> 53 #include <sys/wait.h> 54 #include <sys/pm.h> 55 #include <sys/iso/signal_iso.h> 56 #include <sys/procset.h> 57 58 #include "fpsapi.h" 59 #include "fpsd.h" 60 #include "messages.h" 61 62 /* Local Functions */ 63 64 static int 65 check_invoke_prog(int devid, time_t *last, 66 unsigned tstswap, int frequency, int group_no, int fpu_index); 67 68 static int identify_fpu_to_run_test(int *freq, int *iteration, int *fpu_index); 69 70 void *test_fpu_thr(void *arg); 71 72 #define CPU_TST_FORK_FAIL {\ 73 error = errno; \ 74 fpsd_message(FPSD_NO_EXIT, FPS_WARNING, FORK_FAIL_MSG, \ 75 testpath, strerror(error)); \ 76 return (-1); \ 77 } 78 79 #define CPU_TST_EXEC_FAIL { \ 80 error = errno; \ 81 fpsd_message(FPSD_EXIT_ERROR,\ 82 FPS_WARNING, TST_EXEC_FAIL, testpath, strerror(error)); \ 83 } 84 85 static int boot_tst_delay = FPS_BOOT_TST_DELAY; 86 87 /* Increments failure for the cpu */ 88 static void 89 record_failure(int devid, int index) { 90 if ((index >= 0) && 91 (index < fpsd.d_conf->m_cpuids_size)) { 92 fpsd.d_conf->m_cpus[index].num_failures++; 93 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, 94 RECORD_FAILURE_MSG, devid, index); 95 } 96 } 97 98 /* Returns 1 if testing is diabled for the cpu, else 0 */ 99 100 static int 101 check_if_disabled(int fpu_index) { 102 int is_disabled; 103 104 is_disabled = fpsd.d_conf->m_cpus[fpu_index].disable_test; 105 if (is_disabled) { 106 return (1); 107 } else { 108 return (0); 109 } 110 } 111 112 /* 113 * Forks and executes "fptest" and waits for an amount 114 * of time equal to the time to schedule next "fptest". 115 * Times out if the test does not complete and unbinds 116 * and terminates the test. 117 * Return : 0 = Nothing Invoked. 1 = invoked OK. -1 = Failure. 118 */ 119 static int 120 check_invoke_prog(int devid, /* cpu-id */ 121 time_t *last, /* Last time it was invoked */ 122 unsigned tstswap, /* Expected swap space required for test */ 123 int frequency, /* Frequency of the processor under test */ 124 int group_no, /* Group no. ==> matrix size to be used */ 125 int fpu_index) 126 { 127 int error; 128 hrtime_t start_hrtime = 0, end_hrtime = 0, hrmsecs = 0; 129 hrtime_t hrsecs = 0; 130 pid_t pid = -1; 131 int exit_status = 0; 132 char cpuid_c[64]; 133 char frequency_c[10]; 134 char group_c[10]; 135 int ret = 0; 136 int status = 0; 137 char *testpath; 138 char sig_str[32]; 139 int elapsed_time; 140 int status_available; 141 int max_timeout; 142 int pb_ret; 143 144 testpath = fpsd.d_conf->m_cpus[fpu_index].fptest_path; 145 if (check_if_disabled(fpu_index)) { 146 return (0); 147 } 148 149 /* Compare all in seconds. */ 150 151 *last = time(NULL); 152 153 (void) snprintf(cpuid_c, sizeof (cpuid_c), "%d", devid); 154 (void) snprintf(frequency_c, sizeof (frequency_c), "%d", frequency); 155 (void) snprintf(group_c, sizeof (group_c), "%d", group_no); 156 157 /* Check if enough swap space is there; Return 0 if not. */ 158 159 if (get_free_swap() < (uint64_t)(tstswap+FPS_SWAP_RESERVE)) { 160 fpsd_message(FPSD_NO_EXIT, FPS_INFO, SWAP_WARN, testpath); 161 return (ret); 162 } 163 164 fpsd_message(FPSD_NO_EXIT, FPS_INFO, START_TEST_MSG, 165 testpath, frequency_c, group_c, cpuid_c); 166 167 start_hrtime = gethrtime(); 168 169 pid = fork1(); /* fork1() duplicates only the calling thread */ 170 if (pid == 0) { 171 (void) execl(testpath, /* Path */ 172 FPS_FPUTST_NAME, /* Arg 0 */ 173 "-f", 174 frequency_c, /* Frequency */ 175 "-p", 176 group_c, /* Group no. */ 177 "-d", 178 cpuid_c, /* CPU ID */ 179 (char *)NULL); 180 181 CPU_TST_EXEC_FAIL /* Should never reach here */ 182 } 183 184 if (pid == -1) 185 CPU_TST_FORK_FAIL 186 187 /* Synchronously wait here till the child exits */ 188 189 elapsed_time = 0; 190 status_available = 0; 191 max_timeout = fpsd.d_interval * 1000; 192 while (elapsed_time < max_timeout) { 193 if (pid == waitpid((pid_t)pid, &status, WNOHANG)) { 194 status_available = 1; 195 break; 196 } else { 197 elapsed_time += 50; 198 (void) poll(NULL, 0, 50); /* wait 50 milli sec. */ 199 } 200 } 201 202 if (!status_available) { 203 exit_status = FPU_TIMED_OUT; 204 } else { 205 exit_status = WEXITSTATUS(status); 206 if (exit_status == 0xFF) { 207 /* As WEXITSTATUS returns 0xFF */ 208 exit_status = FPU_UNSUPPORT; 209 } 210 } 211 if (exit_status == FPU_UNSUPPORT) { 212 /* Reprobe */ 213 fpsd.d_conf->m_reprobe = 1; 214 ret = 1; 215 } else if (exit_status == FPU_OK) { 216 /* Increment iteration */ 217 fpsd.d_iteration++; 218 ret = 1; 219 } else if ((exit_status == FPU_FOROFFLINE) || 220 (exit_status == FPU_BIND_FAIL)) { 221 /* Force reprobe */ 222 fpsd.d_conf->m_reprobe = 1; 223 ret = 1; 224 } else if (exit_status == FPU_INVALID_ARG) { 225 /* This should not happen; so force exit */ 226 fpsd_message(FPSD_EXIT_TEST_USAGE, FPS_WARNING, 227 FPU_INVALID_ARG_MSG); 228 } else if ((exit_status == FPU_SIG_SEGV) || 229 (exit_status == FPU_SIG_BUS)) { 230 fpsd_message(FPSD_NO_EXIT, FPS_INFO, FPU_SIG_RCVD, 231 devid); 232 record_failure(devid, fpu_index); 233 ret = -1; /* Retry */ 234 } else if (exit_status == FPU_SIG_FPE) { 235 fpsd_message(FPSD_NO_EXIT, FPS_INFO, FPU_FPE_MSG, 236 devid); 237 record_failure(devid, fpu_index); 238 ret = -1; 239 } else if (exit_status == FPU_SIG_ILL) { 240 fpsd_message(FPSD_NO_EXIT, FPS_INFO, FPU_SIG_ILL_MSG, 241 devid); 242 record_failure(devid, fpu_index); 243 ret = -1; 244 } else if (exit_status == FPU_SYSCALL_FAIL) { 245 fpsd_message(FPSD_NO_EXIT, FPS_INFO, FPU_SYSCALL_FAIL_MSG, 246 devid); 247 record_failure(devid, fpu_index); 248 fpsd.d_iteration++; /* Iteration skipped */ 249 ret = 1; /* Record failure and move on */ 250 } else if (exit_status == FPU_EREPORT_INCOM) { 251 fpsd_message(FPSD_NO_EXIT, FPS_INFO, FPU_EREPORT_INCOM_MSG, 252 devid); 253 fpsd.d_conf->m_reprobe = 1; 254 ret = 1; 255 } else if (exit_status == FPU_SYSCALL_TRYAGAIN) { 256 fpsd_message(FPSD_NO_EXIT, FPS_INFO, FPU_SYSCALL_TRYAGAIN_MSG); 257 ret = -1; /* Retry as it could be some resource issue */ 258 } else if (exit_status == FPU_EREPORT_FAIL) { 259 fpsd_message(FPSD_NO_EXIT, FPS_INFO, FPU_EREPORT_FAIL_MSG, 260 devid); 261 ret = -1; 262 } else if (exit_status == FPU_TIMED_OUT) { 263 pb_ret = processor_bind(P_PID, pid, PBIND_NONE, NULL); 264 if (pb_ret == -1) { 265 fpsd_message(FPSD_NO_EXIT, FPS_INFO, 266 UNBIND_FAIL_MSG, 267 strerror(errno)); 268 } 269 (void) kill(pid, SIGINT); 270 while (pid != waitpid((pid_t)pid, &status, WUNTRACED)) { 271 (void) poll(NULL, 0, 10); 272 (void) kill(pid, SIGINT); 273 } 274 fpsd_message(FPSD_NO_EXIT, FPS_INFO, FPU_TIMED_OUT_MSG, devid); 275 record_failure(devid, fpu_index); 276 ret = -1; 277 } 278 279 /* 280 * The following is the case if the test ended due to a 281 * signal and did not have a handler for the signal. 282 */ 283 if (WIFSIGNALED(status)) { 284 (void) sig2str(WTERMSIG(status), sig_str); 285 fpsd_message(FPSD_NO_EXIT, FPS_INFO, 286 TST_SIGNALED_MSG, devid, 287 frequency, sig_str); 288 record_failure(devid, fpu_index); 289 ret = -1; /* Retry */ 290 } 291 292 end_hrtime = gethrtime(); 293 hrmsecs = ((end_hrtime - start_hrtime)/ 294 ((hrtime_t)1000*1000)); 295 hrsecs = hrmsecs / 1000; 296 fpsd_message(FPSD_NO_EXIT, FPS_INFO, END_TST_MSG, (int)pid, 297 (int)(hrsecs/(60*60)), 298 (int)((hrsecs%3600)/60), 299 (int)(hrsecs%60), 300 (int)(hrmsecs%1000), 301 cpuid_c); 302 303 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, EXIT_STAT_MSG, exit_status); 304 305 return (ret); 306 } 307 308 /* 309 * The test scheduling thread. 310 */ 311 312 void * 313 test_fpu_thr(/* ARGSUSED */ void *arg) 314 { 315 time_t cur = 0, /* current time in secs */ 316 last = 0; /* Last time this level testing done in secs */ 317 int ret; 318 319 int intvl = 0; /* interval */ 320 unsigned tswap = 0; 321 int poll_intvl; 322 long num_cpus; 323 int idle = 0, remain = 0, max_remain = 0; 324 time_t last_wakeup = 0, wakeup_elapse; 325 int fpuid; 326 int frequency; 327 int group_no; 328 329 int force_skip_test_if_pm_idle = 1; 330 int fpu_index; 331 int max_idle_time_4_tst_run; 332 int j; 333 334 /* 335 * If enabled, do not run test on idle system, even if test intvl 336 * explicitly specified. 337 */ 338 339 /* 340 * Minimum time to wait before scheduling tests 341 * when the system just wakes up from sleep. 342 */ 343 #define MINSLEEP 8 344 345 num_cpus = fpsd.d_conf->m_num_on_fpuids; 346 347 intvl = poll_intvl = fpsd.d_interval; 348 349 tswap = FPS_LOWTST_SWAP; 350 351 cur = time(NULL); 352 353 /* 354 * Initialize last time test done based on earlier bootup testing. 355 * This decides when the first time scheduling of the test is 356 * to be done. 357 */ 358 359 /* 360 * In systems with less than 3 processors, the initial testing 361 * has been found to affect the system bootup time. 362 * Wait for 5 min for those systems before starting any testing. 363 */ 364 365 if (num_cpus < 3) 366 fps_wait_secs(boot_tst_delay); 367 368 /* Soft bind before once before starting test. */ 369 if (processor_bind(P_PID, P_MYID, PBIND_SOFT, NULL) != 0) { 370 fpsd_message(FPSD_EXIT_ERROR, FPS_WARNING, SYSTEM_CALL_FAIL, 371 "processor_bind", strerror(errno)); 372 } 373 374 if (pset_bind(PS_SOFT, P_PID, P_MYID, NULL) != 0) { 375 fpsd_message(FPSD_EXIT_ERROR, FPS_WARNING, SYSTEM_CALL_FAIL, 376 "pset_bind", strerror(errno)); 377 } 378 379 #define MAX_IDLE_TIME_FOR_TSTRUN 10 380 381 if (intvl/2 > MAX_IDLE_TIME_FOR_TSTRUN) { 382 max_idle_time_4_tst_run = 383 MAX_IDLE_TIME_FOR_TSTRUN; 384 } else { 385 max_idle_time_4_tst_run = 386 (intvl/2) + 387 MAX_TEST_RUN_TIME; 388 } 389 390 cur = time(NULL); 391 last = 0; /* Force the invocation by setting last to zero. */ 392 393 394 for (;;) { 395 time_t elapse; 396 397 cur = time(NULL); 398 elapse = cur - last; 399 400 /* 401 * Sleep for intvl secs amount of time. 402 */ 403 404 if (elapse >= (time_t)intvl) 405 poll_intvl = 0; 406 else /* Don't sleep more than 1 min at a time */ 407 poll_intvl = (int)((time_t)intvl-elapse); 408 409 /* 410 * Until poll_intvl becomes zero, sleep. 411 * If poll gets interrupted for any reason, then also works. 412 */ 413 414 if (poll_intvl > 0) { 415 (void) poll(NULL, 0, poll_intvl*1000); 416 continue; 417 } 418 419 #define INVOKE_PROG { \ 420 fpuid = identify_fpu_to_run_test(&frequency, &group_no, &fpu_index);\ 421 if (intvl != fpsd.d_interval) { \ 422 /* \ 423 * Interval has changed due to change in \ 424 * online processors/ config properties. \ 425 */ \ 426 intvl = fpsd.d_interval; \ 427 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, \ 428 INTVL_CHANGED_MSG, intvl); \ 429 } \ 430 if (fpuid == -1) {\ 431 /* Testing could not be done on any cpu */\ 432 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, \ 433 INVALID_FPU_ID); \ 434 last = time(NULL); \ 435 continue;\ 436 }\ 437 ret = check_invoke_prog(fpuid, &last, tswap, frequency, \ 438 group_no, fpu_index); \ 439 if (ret == -1) { \ 440 for (j = 0; (j < MAX_RETRIES) && (ret != 1); j++) { \ 441 (void) poll(NULL, 0, RETRY_INTVL); \ 442 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, \ 443 RESCHEDULE_MSG, fpuid);\ 444 ret = check_invoke_prog(fpuid, &last, tswap, \ 445 frequency, group_no, fpu_index); \ 446 } \ 447 if (ret == -1) { \ 448 /*\ 449 * Tried MAX_RETRIES times. Still seeing failures\ 450 * on this fpu. Skip this iteration and move on.\ 451 */\ 452 fpsd.d_iteration++; \ 453 } \ 454 } \ 455 } 456 457 /* 458 * If power management is disabled (or not supported) on the 459 * system, just go ahead, invoke the program. 460 */ 461 update_pm_state(); /* Update current PM state. */ 462 if (sys_pm_state != PM_SYSTEM_PM_ENABLED) { 463 /* autopm disabled. Just go ahead invoke program. */ 464 INVOKE_PROG 465 continue; 466 } 467 468 /* 469 * Power management is enabled. This system may be CPU PM 470 * enabled system or just disk(and other) PM enabled. 471 * If CPU PM not supported, just invoke the program. 472 */ 473 if (!is_estar_system) { 474 INVOKE_PROG 475 continue; 476 } 477 478 /* This system is CPU PM currently supported & enabled. */ 479 480 /* 481 * By deafult, tests are not invoked on E* compliant system. 482 * However if force_skip_test_if_pm_idle is set to 0, tests 483 * will be invoked. This is kept for debugging purposes for now. 484 * Should be removed if no use cases. 485 */ 486 487 if (!force_skip_test_if_pm_idle) { 488 INVOKE_PROG 489 continue; 490 } 491 492 /* 493 * If the system is in sleep mode, wait until it comes 494 * to full power mode. 495 */ 496 497 /* If CPUs are not in full power mode, this will return -1 */ 498 ret = get_idle_rem_stats(&idle, &remain, &max_remain); 499 500 /* 501 * Wait until CPU comes to full power mode. 502 * Call wait for state change function -- the return from the 503 * function does not guarantee that the system is in full power 504 * mode. So get the current status later as well. 505 */ 506 if (ret == -1) { 507 while (ret == -1) { 508 /* Avoid busy loop in any case */ 509 (void) poll(NULL, 0, 1000); 510 /* Wait until CPU comes to full pwr */ 511 wait_for_pm_state_change(); 512 ret = get_idle_rem_stats(&idle, &remain, 513 &max_remain); 514 } 515 516 /* Remember the last time that we woke up. */ 517 last_wakeup = time(NULL); 518 } 519 520 /* 521 * To meet E* requirements, the system should go to 522 * deep sleep mode in 30 mins on default configs. 523 * The CPU power management does this by 14.25min+14.25min 524 * so total 28.5mins. (in sleep mode followed by deep sleep). 525 * Running the test as the system just becomes active, 526 * may reset the idle counter and may delay the transition. 527 * However since we have 1.5 mins cushion to meet E* 528 * requirements, we are just making use of it. 529 * 530 * If system is idle for more than 10 seconds, wait 531 * until the system idle time is less than 10 seconds. 532 * Poll in 2 sec interval, so we will catch it as soon 533 * as the system idle time goes low (as it just becomes busy). 534 * Basically don't run test on an idle system. 535 * If the system is continously busy, then this will 536 * result in continously scheduling the test. 537 * 538 * Running test on a system which is just 10 seconds idle, 539 * may reset the idle counter. 540 * This will postpone the idle transition to it's lowest power 541 * by worst case of 10 secs + worst case run time for fptest 542 * that is approximately 1 sec = 11 sec. 543 * This is below the 1.5mins cushion CPU PM now has to make 544 * idle transition. 545 * 546 * So if d_interval/2 >= 10 follow the above logic. Else, reduce 547 * max_idle_time_4_tstrun = d_interval/2 + max_time_taken_by_test 548 * (which is <= 1s). We want to be conservative in scheduling 549 * test rather than utilize the cushion to maximum possible 550 * extent. 551 * Note: The E* desktop systems have atmost 2 processors, but 552 * this will work even for more processors in which case the 553 * interval will be less or if the interval is configured thro' 554 * SMF. 555 * As long as atleast any one processor is in full power mode, 556 * all processors have to be in same power level. 557 */ 558 559 /* Invoke program if system is "busy" */ 560 561 if (idle <= max_idle_time_4_tst_run) { 562 /* 563 * If the system is just waking up from sleep, don't rush into 564 * testing immediately to avoid hiccups in performance. 565 * 566 */ 567 wakeup_elapse = time(NULL) - last_wakeup; 568 if (wakeup_elapse < MINSLEEP) { 569 fps_wait_secs((int)(MINSLEEP-wakeup_elapse)); 570 } 571 INVOKE_PROG 572 continue; 573 } 574 575 /* The system is "idle". Wait until it becomes "busy" */ 576 while (idle > max_idle_time_4_tst_run) { 577 578 /* 579 * Once in max_idle_time_4_tst_run/2 secs, we are issuing 580 * ioctl call to catch the system as soon as it becomes 581 * "busy". Polling is not an efficient way to do this, 582 * but this is the only way we got right now. 583 */ 584 fps_wait_secs(max_idle_time_4_tst_run / 2); 585 ret = get_idle_rem_stats(&idle, &remain, &max_remain); 586 if (ret == -1) break; /* Incase now in sleep mode */ 587 } 588 continue; 589 590 } /* End infinite for loop */ 591 592 #pragma error_messages(off, E_STATEMENT_NOT_REACHED) 593 /* NOTREACHED */ 594 return (NULL); 595 } 596 597 /* 598 * get_num_onln_cpus(): returns the number of processors that are in 599 * "on-line" state only. This number will be less than the number 600 * returned by sysconf(_SC_NPROCESSORS_ONLN) if there are some 601 * processors in "no-intr" state. 602 */ 603 604 static int 605 get_num_onln_cpus() 606 { 607 int i; 608 int num_onln = 0; 609 int total_onln = sysconf(_SC_NPROCESSORS_ONLN); 610 611 for (i = 0; i < fpsd.d_conf->m_max_cpuid; i++) { 612 if (p_online(i, P_STATUS) == P_ONLINE) { 613 num_onln++; 614 } 615 if (num_onln == total_onln) { 616 /* Break after all onln cpuids found */ 617 break; 618 } 619 } 620 return (num_onln); 621 } 622 623 /* 624 * Identifies the fpu on which test will be scheduled next. 625 */ 626 627 static int 628 identify_fpu_to_run_test(int *freq, int *iteration, int *fpu_index) 629 { 630 int fpuid = -1; 631 int ascend; 632 int tmp_iter; 633 fps_cpu_t fps_cpu; 634 int i; 635 int num_onln; 636 /* Timestamp at which SIGHUP ts was checked last */ 637 static hrtime_t ts_hup_chkd = 0; 638 hrtime_t tmp_ts; 639 640 *iteration = *freq = 0; 641 while (fpuid == -1) { 642 /* Check if the number of online processors has changed */ 643 num_onln = get_num_onln_cpus(); 644 if (num_onln != fpsd.d_conf->m_num_on_fpuids) { 645 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, REPROBE_MSG); 646 fpsd.d_conf->m_reprobe = 1; 647 } 648 649 tmp_ts = fpsd.d_ts_hup; 650 if (fpsd.d_ts_hup > ts_hup_chkd) { 651 fpsd.d_conf->m_reprobe = 1; 652 } 653 ts_hup_chkd = tmp_ts; 654 655 if (1 == fpsd.d_conf->m_reprobe) { 656 fpsd_read_config(); 657 } 658 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, IDENTIFY_FPU_MSG, 659 fpsd.d_fpuid_index, fpsd.d_iteration, 660 fpsd.d_conf->total_iter, fpsd.d_conf->m_cpuids_size); 661 if (fpsd.d_iteration == fpsd.d_conf->total_iter) { 662 /* One pass completed */ 663 fpsd.d_iteration = 0; 664 665 /* Reinit iterations */ 666 for (i = 0; i < fpsd.d_conf->m_cpuids_size; i++) { 667 if (fpsd.d_conf->m_cpus[i].disable_test) 668 continue; 669 ascend = fpsd.d_conf->m_cpus[i].asc; 670 if (ascend) { 671 fpsd.d_conf->m_cpus[i].previous_iteration = 0; 672 } else { 673 fpsd.d_conf->m_cpus[i].previous_iteration = 674 fpsd.d_conf->m_cpus[i].total_iterations + 1; 675 } 676 } 677 } 678 if (fpsd.d_iteration == 0) { /* Beginning of one pass */ 679 fpsd.d_fpuid_index = 0; 680 while (fpsd.d_fpuid_index < 681 fpsd.d_conf->m_cpuids_size) { 682 if (fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].\ 683 disable_test) { 684 fpsd.d_fpuid_index++; 685 } else { 686 break; 687 } 688 } 689 if (fpsd.d_fpuid_index == fpsd.d_conf->m_cpuids_size) { 690 return (-1); 691 } 692 } else { 693 if (fpsd.d_fpuid_index == 694 (fpsd.d_conf->m_cpuids_size-1)) { 695 /* One iteration done for all fpus */ 696 fpsd.d_fpuid_index = 0; 697 } else { 698 fpsd.d_fpuid_index++; 699 } 700 } 701 fps_cpu = fpsd.d_conf->m_cpus[fpsd.d_fpuid_index]; 702 fpuid = fps_cpu.cpuid; 703 if (fps_cpu.disable_test) { 704 fpuid = -1; 705 continue; 706 } 707 *freq = fps_cpu.frequency; 708 709 /* Find the iteration no. */ 710 tmp_iter = fps_cpu.previous_iteration; 711 ascend = fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].asc; 712 if (ascend) { 713 if (tmp_iter == fps_cpu.total_iterations) { 714 /* 715 * 1 pass completed for this fpu; 716 * skip this fpu and goto the next fpu 717 */ 718 fpuid = -1; 719 continue; 720 } else { 721 fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].\ 722 previous_iteration++; 723 } 724 } else { 725 /* This FPU is tested in descending order of */ 726 /* iteration no. ==> matrix size */ 727 if (tmp_iter == 1) { 728 /* 729 * 1 pass completed for this fpu; 730 * skip this fpu and goto the next fpu 731 */ 732 fpuid = -1; 733 continue; 734 } else { 735 fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].\ 736 previous_iteration--; 737 } 738 } 739 *iteration = 740 fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].previous_iteration; 741 *fpu_index = fpsd.d_fpuid_index; 742 fpsd_message(FPSD_NO_EXIT, FPS_DEBUG, IDENTIFY_FPU_RTN_MSG, 743 fpuid, *iteration, *freq, 744 fpsd.d_conf->m_cpus[fpsd.d_fpuid_index].\ 745 previous_iteration, 746 fps_cpu.total_iterations); 747 } 748 return (fpuid); 749 } 750