1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include "config.h" 29 30 #include <stdio.h> 31 #include <fcntl.h> 32 #include <sys/types.h> 33 34 #ifdef HAVE_SYSINFO 35 #include <sys/sysinfo.h> 36 #endif 37 38 #ifdef HAVE_LIBKSTAT 39 #include <kstat.h> 40 #include <sys/cpuvar.h> 41 #endif /* HAVE_LIBKSTAT */ 42 43 #include <stdarg.h> 44 45 #include "filebench.h" 46 #include "flowop.h" 47 #include "vars.h" 48 #include "stats.h" 49 50 /* 51 * A set of routines for collecting and dumping various filebench 52 * run statistics. 53 */ 54 55 /* Global statistics */ 56 static flowstat_t *globalstats = NULL; 57 58 static hrtime_t stats_cputime = 0; 59 60 #ifdef HAVE_LIBKSTAT 61 static kstat_ctl_t *kstatp = NULL; 62 static kstat_t *sysinfo_ksp = NULL; 63 64 /* 65 * Uses the kstat library or, if it is not available, the /proc/stat file 66 * to obtain cpu statistics. Collects statistics for each cpu, initializes 67 * a local pointer to the sysinfo kstat, and returns the sum of user and 68 * kernel time for all the cpus. 69 */ 70 static fbint_t 71 kstats_read_cpu(void) 72 { 73 int ncpus; 74 kstat_t **cpu_stat_list = NULL; 75 ulong_t cputime_states[CPU_STATES]; 76 hrtime_t cputime; 77 int i; 78 79 kstat_t *ksp; 80 81 if (kstatp == NULL) { 82 if ((kstatp = kstat_open()) == (kstat_ctl_t *)NULL) { 83 filebench_log(LOG_ERROR, "Cannot read kstats"); 84 return (-1); 85 } 86 } 87 88 /* 89 * Per-CPU statistics 90 */ 91 92 ncpus = 0; 93 for (ksp = kstatp->kc_chain; ksp; ksp = ksp->ks_next) 94 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0) 95 ncpus++; 96 97 if ((cpu_stat_list = 98 (kstat_t **)malloc(ncpus * sizeof (kstat_t *))) == NULL) { 99 filebench_log(LOG_ERROR, "malloc failed"); 100 return (-1); 101 } 102 103 ncpus = 0; 104 for (ksp = kstatp->kc_chain; ksp; ksp = ksp->ks_next) 105 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0 && 106 kstat_read(kstatp, ksp, NULL) != -1) 107 cpu_stat_list[ncpus++] = ksp; 108 109 if (ncpus == 0) { 110 filebench_log(LOG_ERROR, 111 "kstats can't find any cpu statistics"); 112 return (0); 113 } 114 115 if (sysinfo_ksp == NULL) 116 sysinfo_ksp = kstat_lookup(kstatp, "unix", 0, "sysinfo"); 117 118 /* Sum across all CPUs */ 119 (void) memset(&cputime_states, 0, sizeof (cputime_states)); 120 for (i = 0; i < ncpus; i++) { 121 cpu_stat_t cpu_stats; 122 int j; 123 124 (void) kstat_read(kstatp, cpu_stat_list[i], 125 (void *) &cpu_stats); 126 for (j = 0; j < CPU_STATES; j++) 127 cputime_states[j] += cpu_stats.cpu_sysinfo.cpu[j]; 128 } 129 130 cputime = cputime_states[CPU_KERNEL] + cputime_states[CPU_USER]; 131 132 return (10000000LL * cputime); 133 } 134 #else /* HAVE_LIBKSTAT */ 135 #ifdef HAVE_PROC_STAT 136 static FILE *statfd = 0; 137 fbint_t 138 kstats_read_cpu(void) 139 { 140 /* 141 * Linux provides system wide statistics in /proc/stat 142 * The entry for cpu is 143 * cpu 1636 67 1392 208671 5407 20 12 144 * cpu0 626 8 997 104476 2499 7 7 145 * cpu1 1010 58 395 104195 2907 13 5 146 * 147 * The number of jiffies (1/100ths of a second) that the 148 * system spent in user mode, user mode with low priority 149 * (nice), system mode, and the idle task, respectively. 150 */ 151 unsigned int user, nice, system; 152 char cpu[128]; /* placeholder to read "cpu" */ 153 if (statfd == 0) { 154 statfd = fopen("/proc/stat", "r"); 155 if (statfd < 0) { 156 filebench_log(LOG_ERROR, "Cannot open /proc/stat"); 157 return (-1); 158 } 159 } 160 if (fscanf(statfd, "%s %u %u %u", cpu, &user, &nice, &system) < 0) { 161 filebench_log(LOG_ERROR, "Cannot read /proc/stat"); 162 return (-1); 163 } 164 /* convert jiffies to nanosecs */ 165 return ((user+nice+system)*1000000); 166 } 167 168 #else /* HAVE_PROC_STAT */ 169 fbint_t 170 kstats_read_cpu(void) 171 { 172 return (0); 173 } 174 #endif 175 #endif /* HAVE_LIBKSTAT */ 176 177 /* 178 * Returns the net cpu time used since the beginning of the run. 179 * Just calls kstat_read_cpu() and subtracts stats_cputime which 180 * is set at the beginning of the filebench run. 181 */ 182 static hrtime_t 183 kstats_read_cpu_relative(void) 184 { 185 hrtime_t cputime; 186 187 cputime = kstats_read_cpu(); 188 return (cputime - stats_cputime); 189 } 190 191 /* 192 * IO Overhead CPU is the amount of CPU that is incurred running 193 * the benchmark infrastructure. 194 * 195 * It is computed as the sum of micro-state cpu time for each 196 * thread around the op being tested. 197 * 198 * Overhead time is computed for each flow. 199 * 200 * System overhead is computed as the overhead for I/O flows 201 * plus all other time running non-io related flowops 202 * 203 */ 204 205 /* 206 * Computes and returns the overhead CPU time attibutable to 207 * IO type flowops. 208 */ 209 static hrtime_t 210 io_stats_ohead(void) 211 { 212 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 213 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 214 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL]; 215 216 filebench_log(LOG_DEBUG_NEVER, 217 "Computing overhead as %llu + %llu - %llu - %llu", 218 (u_longlong_t)glstat->fs_mstate[FLOW_MSTATE_OHEAD], 219 (u_longlong_t)glstat->fs_mstate[FLOW_MSTATE_CPU], 220 (u_longlong_t)iostat->fs_mstate[FLOW_MSTATE_CPU], 221 (u_longlong_t)aiostat->fs_mstate[FLOW_MSTATE_CPU]); 222 223 return ((glstat->fs_mstate[FLOW_MSTATE_OHEAD] + 224 glstat->fs_mstate[FLOW_MSTATE_CPU] - 225 iostat->fs_mstate[FLOW_MSTATE_CPU] - 226 aiostat->fs_mstate[FLOW_MSTATE_CPU])); 227 } 228 229 /* 230 * Returns the total overhead CPU time. 231 */ 232 static hrtime_t 233 gl_stats_ohead(void) 234 { 235 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL]; 236 237 return (glstat->fs_mstate[FLOW_MSTATE_OHEAD]); 238 } 239 240 /* 241 * Places the value represented by "name" into the var_val.integer field of the 242 * supplied var_t. Compares the supplied "name" with a set of predefined 243 * names and calculates the value from the appropriate globalstats field(s). 244 */ 245 var_t * 246 stats_findvar(var_t *var, char *name) 247 { 248 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 249 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 250 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL]; 251 252 filebench_log(LOG_DEBUG_IMPL, "reading stats %s", name); 253 254 if (globalstats == NULL) 255 globalstats = malloc(FLOW_TYPES * sizeof (flowstat_t)); 256 257 if (strcmp(name, "iocount") == 0) { 258 fbint_t stat; 259 260 stat = iostat->fs_count + aiostat->fs_count; 261 VAR_SET_INT(var, stat); 262 filebench_log(LOG_DEBUG_IMPL, "reading stats %s = %llu", 263 name, (u_longlong_t)stat); 264 return (var); 265 } 266 267 if (strcmp(name, "iorate") == 0) { 268 fbint_t stat; 269 270 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 271 stat = (iostat->fs_count + aiostat->fs_count) / 272 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS); 273 VAR_SET_INT(var, stat); 274 return (var); 275 } 276 277 278 if (strcmp(name, "ioreadrate") == 0) { 279 fbint_t stat; 280 281 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 282 stat = (iostat->fs_rcount + aiostat->fs_rcount) / 283 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS); 284 VAR_SET_INT(var, stat); 285 return (var); 286 } 287 288 289 if (strcmp(name, "iowriterate") == 0) { 290 fbint_t stat; 291 292 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 293 stat = (iostat->fs_wcount + aiostat->fs_wcount) / 294 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS); 295 VAR_SET_INT(var, stat); 296 return (var); 297 } 298 299 300 if (strcmp(name, "iobandwidth") == 0) { 301 fbint_t stat; 302 303 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 304 stat = 305 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) / 306 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS); 307 VAR_SET_INT(var, stat); 308 return (var); 309 } 310 311 if (strcmp(name, "iolatency") == 0) { 312 fbint_t stat; 313 314 stat = iostat->fs_count ? iostat->fs_mstate[FLOW_MSTATE_LAT] / 315 (iostat->fs_count * 1000UL) : 0; 316 VAR_SET_INT(var, stat); 317 return (var); 318 } 319 320 if (strcmp(name, "iocpu") == 0) { 321 fbint_t stat; 322 323 stat = (iostat->fs_count + aiostat->fs_count) ? 324 (iostat->fs_mstate[FLOW_MSTATE_CPU] + 325 aiostat->fs_mstate[FLOW_MSTATE_CPU]) / ((iostat->fs_count + 326 aiostat->fs_count) * 1000UL) : 0; 327 VAR_SET_INT(var, stat); 328 return (var); 329 } 330 331 332 if (strcmp(name, "oheadcpu") == 0) { 333 fbint_t stat; 334 335 stat = (iostat->fs_count + aiostat->fs_count) ? 336 io_stats_ohead() / ((iostat->fs_count + 337 aiostat->fs_count) * 1000UL) : 0; 338 VAR_SET_INT(var, stat); 339 return (var); 340 } 341 342 if (strcmp(name, "iowait") == 0) { 343 fbint_t stat; 344 345 stat = iostat->fs_count ? 346 iostat->fs_mstate[FLOW_MSTATE_WAIT] / 347 (iostat->fs_count * 1000UL) : 0; 348 VAR_SET_INT(var, stat); 349 return (var); 350 } 351 352 if (strcmp(name, "syscpu") == 0) { 353 fbint_t stat; 354 355 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 356 stat = glstat->fs_syscpu / 1000.0; 357 VAR_SET_INT(var, stat); 358 return (var); 359 } 360 361 if (strcmp(name, "iocpusys") == 0) { 362 fbint_t stat; 363 364 stat = (iostat->fs_count + aiostat->fs_count) ? 365 iostat->fs_syscpu / ((iostat->fs_count + 366 aiostat->fs_count) * 1000UL) : 0; 367 368 VAR_SET_INT(var, stat); 369 return (var); 370 } 371 372 filebench_log(LOG_DEBUG_IMPL, 373 "error reading stats %s", name); 374 375 return (NULL); 376 } 377 378 /* 379 * Initializes the static variable "stats_cputime" with the 380 * current cpu time, for use by kstats_read_cpu_relative. 381 */ 382 void 383 stats_init(void) 384 { 385 #if defined(HAVE_LIBKSTAT) || defined(LINUX_PORT) 386 stats_cputime = kstats_read_cpu(); 387 #else 388 stats_cputime = 0; 389 #endif /* HAVE_LIBKSTAT */ 390 } 391 392 /* 393 * Add a flowstat b to a, leave sum in a. 394 */ 395 static void 396 stats_add(flowstat_t *a, flowstat_t *b) 397 { 398 int i; 399 400 a->fs_count += b->fs_count; 401 a->fs_rcount += b->fs_rcount; 402 a->fs_wcount += b->fs_wcount; 403 a->fs_bytes += b->fs_bytes; 404 a->fs_rbytes += b->fs_rbytes; 405 a->fs_wbytes += b->fs_wbytes; 406 407 for (i = 0; i < FLOW_MSTATES; i++) 408 a->fs_mstate[i] += b->fs_mstate[i]; 409 } 410 411 /* 412 * Takes a "snapshot" of the global statistics. Actually, it calculates 413 * them from the local statistics maintained by each flowop. 414 * First the routine pauses filebench, then rolls the statistics for 415 * each flowop into its associated FLOW_MASTER flowop. 416 * Next all the FLOW_MASTER flowops' statistics are written 417 * to the log file followed by the global totals. Then filebench 418 * operation is allowed to resume. 419 */ 420 void 421 stats_snap(void) 422 { 423 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 424 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 425 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL]; 426 hrtime_t cputime; 427 flowop_t *flowop; 428 char *str; 429 430 if (globalstats == NULL) { 431 filebench_log(LOG_ERROR, 432 "'stats snap' called before 'stats clear'"); 433 return; 434 } 435 436 /* don't print out if run ended in error */ 437 if (filebench_shm->shm_f_abort == FILEBENCH_ABORT_ERROR) { 438 filebench_log(LOG_ERROR, 439 "NO VALID RESULTS! FileBench run terminated prematurely"); 440 return; 441 } 442 443 globalstats->fs_etime = gethrtime(); 444 445 filebench_log(LOG_DEBUG_SCRIPT, "Stats period = %ds", 446 (globalstats->fs_etime - globalstats->fs_stime) / 1000000000); 447 448 /* Freeze statistics during update */ 449 filebench_shm->shm_bequiet = 1; 450 451 flowop = filebench_shm->shm_flowoplist; 452 while (flowop) { 453 flowop_t *flowop_master; 454 455 if (flowop->fo_instance <= FLOW_DEFINITION) { 456 flowop = flowop->fo_next; 457 continue; 458 } 459 460 flowop_master = flowop_find_one(flowop->fo_name, FLOW_MASTER); 461 462 /* Roll up per-flowop into global stats */ 463 stats_add(&globalstats[flowop->fo_type], 464 &flowop->fo_stats); 465 stats_add(&globalstats[FLOW_TYPE_GLOBAL], 466 &flowop->fo_stats); 467 468 if (flowop_master && IS_FLOW_ACTIVE(flowop)) { 469 flowop_master->fo_stats.fs_active++; 470 } 471 472 if (flowop_master) { 473 /* Roll up per-flow stats into master */ 474 flowop_master->fo_stats.fs_children++; 475 stats_add(&flowop_master->fo_stats, &flowop->fo_stats); 476 } else { 477 filebench_log(LOG_DEBUG_NEVER, 478 "flowop_stats could not find %s", 479 flowop->fo_name); 480 } 481 482 filebench_log(LOG_DEBUG_SCRIPT, 483 "flowop %-20s-%4d - %5d ops, %5.1lf, ops/s %5.1lfmb/s " 484 "%8.3fms/op", 485 flowop->fo_name, 486 flowop->fo_instance, 487 flowop->fo_stats.fs_count, 488 flowop->fo_stats.fs_count / 489 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 490 (flowop->fo_stats.fs_bytes / (1024 * 1024)) / 491 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 492 flowop->fo_stats.fs_count ? 493 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] / 494 (flowop->fo_stats.fs_count * 1000000.0) : 0); 495 496 flowop = flowop->fo_next; 497 498 } 499 500 #if defined(HAVE_LIBKSTAT) || defined(LINUX_PORT) 501 cputime = kstats_read_cpu_relative(); 502 #endif /* HAVE_LIBKSTAT */ 503 504 filebench_log(LOG_DEBUG_IMPL, 505 "cputime = %llu, ohead = %llu", 506 (u_longlong_t)(cputime / 1000000000), 507 (u_longlong_t)(io_stats_ohead() / 1000000000)); 508 iostat->fs_syscpu = 509 (cputime > io_stats_ohead()) ? 510 (cputime - io_stats_ohead()) : 0; 511 glstat->fs_syscpu = 512 (cputime > gl_stats_ohead()) ? 513 (cputime - gl_stats_ohead()) : 0; 514 515 516 flowop = filebench_shm->shm_flowoplist; 517 str = malloc(1048576); 518 *str = NULL; 519 (void) strcpy(str, "Per-Operation Breakdown\n"); 520 while (flowop) { 521 char line[1024]; 522 523 if (flowop->fo_instance != FLOW_MASTER) { 524 flowop = flowop->fo_next; 525 continue; 526 } 527 528 (void) snprintf(line, sizeof (line), "%-20s %8.0lfops/s " 529 "%5.1lfmb/s %8.1fms/op %8.0fus/op-cpu\n", 530 flowop->fo_name, 531 flowop->fo_stats.fs_count / 532 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 533 (flowop->fo_stats.fs_bytes / (1024 * 1024)) / 534 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 535 flowop->fo_stats.fs_count ? 536 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] / 537 (flowop->fo_stats.fs_count * 1000000.0) : 0, 538 flowop->fo_stats.fs_count ? 539 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] / 540 (flowop->fo_stats.fs_count * 1000.0) : 0); 541 (void) strcat(str, line); 542 543 flowop = flowop->fo_next; 544 } 545 546 filebench_log(LOG_INFO, "%s", str); 547 free(str); 548 549 filebench_log(LOG_INFO, 550 "\nIO Summary: %5d ops %5.1lf ops/s, (%0.0lf/%0.0lf r/w) " 551 "%5.1lfmb/s, %6.0fus cpu/op, %5.1fms latency", 552 iostat->fs_count + aiostat->fs_count, 553 (iostat->fs_count + aiostat->fs_count) / 554 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 555 (iostat->fs_rcount + aiostat->fs_rcount) / 556 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 557 (iostat->fs_wcount + aiostat->fs_wcount) / 558 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 559 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) / 560 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 561 (iostat->fs_rcount + iostat->fs_wcount + 562 aiostat->fs_rcount + aiostat->fs_wcount) ? 563 (iostat->fs_syscpu / 1000.0) / 564 (iostat->fs_rcount + iostat->fs_wcount + 565 aiostat->fs_rcount + aiostat->fs_wcount) : 0, 566 (iostat->fs_rcount + iostat->fs_wcount) ? 567 iostat->fs_mstate[FLOW_MSTATE_LAT] / 568 ((iostat->fs_rcount + iostat->fs_wcount) * 1000000.0) : 0); 569 570 571 filebench_shm->shm_bequiet = 0; 572 } 573 574 /* 575 * Dumps the per-operation statistics and global statistics to the dump file. 576 */ 577 void 578 stats_dump(char *filename) 579 { 580 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 581 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 582 flowop_t *flowop; 583 584 /* don't dump stats if run ended in error */ 585 if (filebench_shm->shm_f_abort == FILEBENCH_ABORT_ERROR) 586 return; 587 588 (void) strcpy(filebench_shm->shm_dump_filename, filename); 589 590 filebench_log(LOG_INFO, "in statsdump %s", filename); 591 592 if (filebench_shm->shm_dump_fd > 0) { 593 (void) close(filebench_shm->shm_dump_fd); 594 filebench_shm->shm_dump_fd = -1; 595 } 596 597 filebench_log(LOG_DUMP, "Flowop totals:"); 598 599 flowop = filebench_shm->shm_flowoplist; 600 while (flowop) { 601 602 if (flowop->fo_instance != FLOW_MASTER) { 603 flowop = flowop->fo_next; 604 continue; 605 } 606 607 filebench_log(LOG_DUMP, 608 "%-20s %8.0lfops/s %5.1lfmb/s " 609 "%8.1fms/op %8.0fus/op-cpu", 610 flowop->fo_name, 611 flowop->fo_stats.fs_count / 612 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 613 (flowop->fo_stats.fs_bytes / (1024 * 1024)) / 614 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 615 flowop->fo_stats.fs_count ? 616 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] / 617 (flowop->fo_stats.fs_count * 1000000.0) : 0, 618 flowop->fo_stats.fs_count ? 619 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] / 620 (flowop->fo_stats.fs_count * 1000.0) : 0); 621 622 flowop = flowop->fo_next; 623 } 624 625 filebench_log(LOG_DUMP, ""); 626 filebench_log(LOG_DUMP, 627 "IO Summary: %8d ops %8.1lf ops/s, %8.0lf/%0.0lf r/w" 628 "%8.1lfmb/s, %8.0fuscpu/op", 629 630 iostat->fs_count + aiostat->fs_count, 631 (iostat->fs_count + aiostat->fs_count) / 632 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 633 634 (iostat->fs_rcount + aiostat->fs_rcount) / 635 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 636 637 (iostat->fs_wcount + aiostat->fs_wcount) / 638 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 639 640 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) / 641 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 642 643 (iostat->fs_rcount + iostat->fs_wcount + 644 aiostat->fs_rcount + aiostat->fs_wcount) ? 645 (iostat->fs_syscpu / 1000.0) / 646 (iostat->fs_rcount + iostat->fs_wcount + 647 aiostat->fs_rcount + aiostat->fs_wcount) : 0); 648 } 649 650 /* 651 * Same as stats_dump, but in xml format. 652 */ 653 void 654 stats_xmldump(char *filename) 655 { 656 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 657 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 658 flowop_t *flowop; 659 660 /* don't dump stats if run ended in error */ 661 if (filebench_shm->shm_f_abort == FILEBENCH_ABORT_ERROR) 662 return; 663 664 (void) strcpy(filebench_shm->shm_dump_filename, filename); 665 666 if (filebench_shm->shm_dump_fd > 0) { 667 (void) close(filebench_shm->shm_dump_fd); 668 filebench_shm->shm_dump_fd = -1; 669 } 670 671 filebench_log(LOG_DUMP, "<stat_doc name=\"Filebench Workload\">"); 672 filebench_log(LOG_DUMP, "<stat_group name=\"Flowop totals\">"); 673 filebench_log(LOG_DUMP, "<cell_list>"); 674 675 flowop = filebench_shm->shm_flowoplist; 676 while (flowop) { 677 if (flowop->fo_instance != FLOW_MASTER) { 678 flowop = flowop->fo_next; 679 continue; 680 } 681 682 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>", 683 flowop->fo_stats.fs_count / 684 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 685 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>", 686 (flowop->fo_stats.fs_bytes / (1024 * 1024)) / 687 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 688 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>", 689 flowop->fo_stats.fs_count ? 690 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] / 691 (flowop->fo_stats.fs_count * 1000000.0) : 0); 692 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>", 693 flowop->fo_stats.fs_count ? 694 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] / 695 (flowop->fo_stats.fs_count * 1000.0) : 0); 696 697 flowop = flowop->fo_next; 698 } 699 filebench_log(LOG_DUMP, "</cell_list>"); 700 701 filebench_log(LOG_DUMP, "<dim_list>"); 702 filebench_log(LOG_DUMP, "<dim>"); 703 filebench_log(LOG_DUMP, "<dimval>Operations/sec</dimval>"); 704 filebench_log(LOG_DUMP, "<dimval>MB/sec</dimval>"); 705 filebench_log(LOG_DUMP, "<dimval>Latency (ms per operation)</dimval>"); 706 filebench_log(LOG_DUMP, "<dimval>CPU (us per operation)</dimval>"); 707 filebench_log(LOG_DUMP, "</dim>"); 708 709 filebench_log(LOG_DUMP, "<dim>"); 710 flowop = filebench_shm->shm_flowoplist; 711 while (flowop) { 712 if (flowop->fo_instance != FLOW_MASTER) { 713 flowop = flowop->fo_next; 714 continue; 715 } 716 filebench_log(LOG_DUMP, "<dimval>%s</dimval>", flowop->fo_name); 717 flowop = flowop->fo_next; 718 } 719 filebench_log(LOG_DUMP, "</dim>"); 720 filebench_log(LOG_DUMP, "</dim_list>"); 721 filebench_log(LOG_DUMP, "</stat_group>"); 722 723 filebench_log(LOG_DUMP, "<stat_group name=\"IO Summary\">"); 724 filebench_log(LOG_DUMP, "<cell_list>"); 725 filebench_log(LOG_DUMP, "<cell>%0d</cell>", 726 iostat->fs_count + aiostat->fs_count); 727 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>", 728 (iostat->fs_count + aiostat->fs_count) / 729 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 730 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>", 731 (iostat->fs_rcount + aiostat->fs_rcount) / 732 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 733 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>", 734 (iostat->fs_wcount + aiostat->fs_wcount) / 735 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 736 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>", 737 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) / 738 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 739 filebench_log(LOG_DUMP, "<cell>%0.0f</cell>", 740 (iostat->fs_rcount + iostat->fs_wcount + aiostat->fs_rcount + 741 aiostat->fs_wcount) ? (iostat->fs_syscpu / 1000.0) / 742 (iostat->fs_rcount + iostat->fs_wcount + aiostat->fs_rcount + 743 aiostat->fs_wcount) : 0); 744 filebench_log(LOG_DUMP, "</cell_list>"); 745 746 filebench_log(LOG_DUMP, "<dim_list>"); 747 filebench_log(LOG_DUMP, "<dim>"); 748 filebench_log(LOG_DUMP, "<dimval>Operations</dimval>"); 749 filebench_log(LOG_DUMP, "<dimval>Operations/sec</dimval>"); 750 filebench_log(LOG_DUMP, "<dimval>Reads</dimval>"); 751 filebench_log(LOG_DUMP, "<dimval>Writes</dimval>"); 752 filebench_log(LOG_DUMP, "<dimval>MB/sec</dimval>"); 753 filebench_log(LOG_DUMP, "<dimval>CPU (us per operation)</dimval>"); 754 filebench_log(LOG_DUMP, "</dim>"); 755 756 filebench_log(LOG_DUMP, "<dim>"); 757 filebench_log(LOG_DUMP, "<dimval>IO Summary</dimval>"); 758 filebench_log(LOG_DUMP, "</dim>"); 759 filebench_log(LOG_DUMP, "</dim_list>"); 760 filebench_log(LOG_DUMP, "</stat_group>"); 761 filebench_log(LOG_DUMP, "</stat_doc>"); 762 } 763 764 /* 765 * Clears all the statistics variables (fo_stats) for every defined flowop. 766 * It also creates a global flowstat table if one doesn't already exist and 767 * clears it. 768 */ 769 void 770 stats_clear(void) 771 { 772 flowop_t *flowop; 773 774 #ifdef HAVE_LIBKSTAT 775 stats_cputime = kstats_read_cpu(); 776 #else 777 stats_cputime = 0; 778 #endif /* HAVE_LIBKSTAT */ 779 780 if (globalstats == NULL) 781 globalstats = malloc(FLOW_TYPES * sizeof (flowstat_t)); 782 783 (void) memset(globalstats, 0, FLOW_TYPES * sizeof (flowstat_t)); 784 785 flowop = filebench_shm->shm_flowoplist; 786 787 while (flowop) { 788 filebench_log(LOG_DEBUG_IMPL, "Clearing stats for %s-%d", 789 flowop->fo_name, 790 flowop->fo_instance); 791 (void) memset(&flowop->fo_stats, 0, sizeof (flowstat_t)); 792 flowop = flowop->fo_next; 793 } 794 795 (void) memset(globalstats, 0, sizeof (flowstat_t)); 796 globalstats->fs_stime = gethrtime(); 797 } 798