1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include "config.h" 29 30 #include <stdio.h> 31 #include <fcntl.h> 32 #include <sys/types.h> 33 34 #ifdef HAVE_SYSINFO 35 #include <sys/sysinfo.h> 36 #endif 37 38 #ifdef HAVE_LIBKSTAT 39 #include <kstat.h> 40 #include <sys/cpuvar.h> 41 #endif /* HAVE_LIBKSTAT */ 42 43 #include <stdarg.h> 44 45 #include "filebench.h" 46 #include "flowop.h" 47 #include "vars.h" 48 #include "stats.h" 49 50 /* 51 * A set of routines for collecting and dumping various filebench 52 * run statistics. 53 */ 54 55 /* Global statistics */ 56 static flowstat_t *globalstats = NULL; 57 58 static hrtime_t stats_cputime = 0; 59 60 #ifdef HAVE_LIBKSTAT 61 static kstat_ctl_t *kstatp = NULL; 62 static kstat_t *sysinfo_ksp = NULL; 63 64 /* 65 * Uses the kstat library or, if it is not available, the /proc/stat file 66 * to obtain cpu statistics. Collects statistics for each cpu, initializes 67 * a local pointer to the sysinfo kstat, and returns the sum of user and 68 * kernel time for all the cpus. 69 */ 70 static vinteger_t 71 kstats_read_cpu(void) 72 { 73 int ncpus; 74 kstat_t **cpu_stat_list = NULL; 75 ulong_t cputime_states[CPU_STATES]; 76 hrtime_t cputime; 77 int i; 78 79 kstat_t *ksp; 80 81 if (kstatp == NULL) { 82 if ((kstatp = kstat_open()) == (kstat_ctl_t *)NULL) { 83 filebench_log(LOG_ERROR, "Cannot read kstats"); 84 return (-1); 85 } 86 } 87 88 /* 89 * Per-CPU statistics 90 */ 91 92 ncpus = 0; 93 for (ksp = kstatp->kc_chain; ksp; ksp = ksp->ks_next) 94 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0) 95 ncpus++; 96 97 if ((cpu_stat_list = 98 (kstat_t **)malloc(ncpus * sizeof (kstat_t *))) == NULL) { 99 filebench_log(LOG_ERROR, "malloc failed"); 100 return (-1); 101 } 102 103 ncpus = 0; 104 for (ksp = kstatp->kc_chain; ksp; ksp = ksp->ks_next) 105 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0 && 106 kstat_read(kstatp, ksp, NULL) != -1) 107 cpu_stat_list[ncpus++] = ksp; 108 109 if (ncpus == 0) { 110 filebench_log(LOG_ERROR, 111 "kstats can't find any cpu statistics"); 112 return (0); 113 } 114 115 if (sysinfo_ksp == NULL) 116 sysinfo_ksp = kstat_lookup(kstatp, "unix", 0, "sysinfo"); 117 118 /* Sum across all CPUs */ 119 (void) memset(&cputime_states, 0, sizeof (cputime_states)); 120 for (i = 0; i < ncpus; i++) { 121 cpu_stat_t cpu_stats; 122 int j; 123 124 (void) kstat_read(kstatp, cpu_stat_list[i], 125 (void *) &cpu_stats); 126 for (j = 0; j < CPU_STATES; j++) 127 cputime_states[j] += cpu_stats.cpu_sysinfo.cpu[j]; 128 } 129 130 cputime = cputime_states[CPU_KERNEL] + cputime_states[CPU_USER]; 131 132 return (10000000LL * cputime); 133 } 134 #else /* HAVE_LIBKSTAT */ 135 #ifdef HAVE_PROC_STAT 136 static FILE *statfd = 0; 137 vinteger_t 138 kstats_read_cpu(void) 139 { 140 /* 141 * Linux provides system wide statistics in /proc/stat 142 * The entry for cpu is 143 * cpu 1636 67 1392 208671 5407 20 12 144 * cpu0 626 8 997 104476 2499 7 7 145 * cpu1 1010 58 395 104195 2907 13 5 146 * 147 * The number of jiffies (1/100ths of a second) that the 148 * system spent in user mode, user mode with low priority 149 * (nice), system mode, and the idle task, respectively. 150 */ 151 unsigned int user, nice, system; 152 char cpu[128]; /* placeholder to read "cpu" */ 153 if (statfd == 0) { 154 statfd = fopen("/proc/stat", "r"); 155 if (statfd < 0) { 156 filebench_log(LOG_ERROR, "Cannot open /proc/stat"); 157 return (-1); 158 } 159 } 160 if (fscanf(statfd, "%s %u %u %u", cpu, &user, &nice, &system) < 0) { 161 filebench_log(LOG_ERROR, "Cannot read /proc/stat"); 162 return (-1); 163 } 164 /* convert jiffies to nanosecs */ 165 return ((user+nice+system)*1000000); 166 } 167 168 #else /* HAVE_PROC_STAT */ 169 vinteger_t 170 kstats_read_cpu(void) 171 { 172 return (0); 173 } 174 #endif 175 #endif /* HAVE_LIBKSTAT */ 176 177 /* 178 * Returns the net cpu time used since the beginning of the run. 179 * Just calls kstat_read_cpu() and subtracts stats_cputime which 180 * is set at the beginning of the filebench run. 181 */ 182 static hrtime_t 183 kstats_read_cpu_relative(void) 184 { 185 hrtime_t cputime; 186 187 cputime = kstats_read_cpu(); 188 return (cputime - stats_cputime); 189 } 190 191 /* 192 * IO Overhead CPU is the amount of CPU that is incurred running 193 * the benchmark infrastructure. 194 * 195 * It is computed as the sum of micro-state cpu time for each 196 * thread around the op being tested. 197 * 198 * Overhead time is computed for each flow. 199 * 200 * System overhead is computed as the overhead for I/O flows 201 * plus all other time running non-io related flowops 202 * 203 */ 204 205 /* 206 * Computes and returns the overhead CPU time attibutable to 207 * IO type flowops. 208 */ 209 static hrtime_t 210 io_stats_ohead(void) 211 { 212 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 213 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 214 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL]; 215 216 filebench_log(LOG_DEBUG_NEVER, 217 "Computing overhead as %lld + %lld - %lld - %lld", 218 glstat->fs_mstate[FLOW_MSTATE_OHEAD], 219 glstat->fs_mstate[FLOW_MSTATE_CPU], 220 iostat->fs_mstate[FLOW_MSTATE_CPU], 221 aiostat->fs_mstate[FLOW_MSTATE_CPU]); 222 223 return ((glstat->fs_mstate[FLOW_MSTATE_OHEAD] + 224 glstat->fs_mstate[FLOW_MSTATE_CPU] - 225 iostat->fs_mstate[FLOW_MSTATE_CPU] - 226 aiostat->fs_mstate[FLOW_MSTATE_CPU])); 227 } 228 229 /* 230 * Returns the total overhead CPU time. 231 */ 232 static hrtime_t 233 gl_stats_ohead(void) 234 { 235 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL]; 236 237 return (glstat->fs_mstate[FLOW_MSTATE_OHEAD]); 238 } 239 240 /* 241 * Places the value represented by "name" into the var_integer field of the 242 * supplied var_t. Compares the supplied "name" with a set of predefined 243 * names and calculates the value from the appropriate globalstats field(s). 244 */ 245 var_t * 246 stats_findvar(var_t *var, char *name) 247 { 248 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 249 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 250 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL]; 251 252 filebench_log(LOG_DEBUG_IMPL, "reading stats %s", name); 253 254 if (globalstats == NULL) 255 globalstats = malloc(FLOW_TYPES * sizeof (flowstat_t)); 256 257 if (strcmp(name, "iocount") == 0) { 258 var->var_integer = iostat->fs_count + 259 aiostat->fs_count; 260 filebench_log(LOG_DEBUG_IMPL, "reading stats %s = %lld", 261 name, var->var_integer); 262 return (var); 263 } 264 265 if (strcmp(name, "iorate") == 0) { 266 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 267 var->var_integer = (iostat->fs_count + aiostat->fs_count) / 268 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS); 269 return (var); 270 } 271 272 273 if (strcmp(name, "ioreadrate") == 0) { 274 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 275 var->var_integer = (iostat->fs_rcount + aiostat->fs_rcount) / 276 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS); 277 return (var); 278 } 279 280 281 if (strcmp(name, "iowriterate") == 0) { 282 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 283 var->var_integer = (iostat->fs_wcount + aiostat->fs_wcount) / 284 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS); 285 return (var); 286 } 287 288 289 if (strcmp(name, "iobandwidth") == 0) { 290 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 291 var->var_integer = 292 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) / 293 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS); 294 return (var); 295 } 296 297 if (strcmp(name, "iolatency") == 0) { 298 var->var_integer = iostat->fs_count ? 299 iostat->fs_mstate[FLOW_MSTATE_LAT] / 300 (iostat->fs_count * 1000UL) : 0; 301 return (var); 302 } 303 304 if (strcmp(name, "iocpu") == 0) { 305 var->var_integer = (iostat->fs_count + aiostat->fs_count) ? 306 (iostat->fs_mstate[FLOW_MSTATE_CPU] + 307 aiostat->fs_mstate[FLOW_MSTATE_CPU]) / ((iostat->fs_count + 308 aiostat->fs_count) * 1000UL) : 0; 309 return (var); 310 } 311 312 313 if (strcmp(name, "oheadcpu") == 0) { 314 var->var_integer = (iostat->fs_count + aiostat->fs_count) ? 315 io_stats_ohead() / ((iostat->fs_count + 316 aiostat->fs_count) * 1000UL) : 0; 317 return (var); 318 } 319 320 if (strcmp(name, "iowait") == 0) { 321 var->var_integer = iostat->fs_count ? 322 iostat->fs_mstate[FLOW_MSTATE_WAIT] / 323 (iostat->fs_count * 1000UL) : 0; 324 return (var); 325 } 326 327 if (strcmp(name, "syscpu") == 0) { 328 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */ 329 var->var_integer = glstat->fs_syscpu / 1000.0; 330 return (var); 331 } 332 333 if (strcmp(name, "iocpusys") == 0) { 334 var->var_integer = (iostat->fs_count + aiostat->fs_count) ? 335 iostat->fs_syscpu / ((iostat->fs_count + 336 aiostat->fs_count) * 1000UL) : 0; 337 338 return (var); 339 } 340 341 filebench_log(LOG_DEBUG_IMPL, 342 "error reading stats %s", name); 343 344 return (NULL); 345 } 346 347 /* 348 * Initializes the static variable "stats_cputime" with the 349 * current cpu time, for use by kstats_read_cpu_relative. 350 */ 351 void 352 stats_init(void) 353 { 354 #if defined(HAVE_LIBKSTAT) || defined(LINUX_PORT) 355 stats_cputime = kstats_read_cpu(); 356 #else 357 stats_cputime = 0; 358 #endif /* HAVE_LIBKSTAT */ 359 } 360 361 /* 362 * Add a flowstat b to a, leave sum in a. 363 */ 364 static void 365 stats_add(flowstat_t *a, flowstat_t *b) 366 { 367 int i; 368 369 a->fs_count += b->fs_count; 370 a->fs_rcount += b->fs_rcount; 371 a->fs_wcount += b->fs_wcount; 372 a->fs_bytes += b->fs_bytes; 373 a->fs_rbytes += b->fs_rbytes; 374 a->fs_wbytes += b->fs_wbytes; 375 376 for (i = 0; i < FLOW_MSTATES; i++) 377 a->fs_mstate[i] += b->fs_mstate[i]; 378 } 379 380 /* 381 * Takes a "snapshot" of the global statistics. Actually, it calculates 382 * them from the local statistics maintained by each flowop. 383 * First the routine pauses filebench, then rolls the statistics for 384 * each flowop into its associated FLOW_MASTER flowop. 385 * Next all the FLOW_MASTER flowops' statistics are written 386 * to the log file followed by the global totals. Then filebench 387 * operation is allowed to resume. 388 */ 389 void 390 stats_snap(void) 391 { 392 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 393 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 394 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL]; 395 hrtime_t cputime; 396 flowop_t *flowop; 397 char *str; 398 399 if (globalstats == NULL) { 400 filebench_log(LOG_ERROR, 401 "'stats snap' called before 'stats clear'"); 402 return; 403 } 404 405 /* don't print out if run ended in error */ 406 if (filebench_shm->f_abort == FILEBENCH_ABORT_ERROR) { 407 filebench_log(LOG_ERROR, 408 "NO VALID RESULTS! FileBench run terminated prematurely"); 409 return; 410 } 411 412 globalstats->fs_etime = gethrtime(); 413 414 filebench_log(LOG_DEBUG_SCRIPT, "Stats period = %ds", 415 (globalstats->fs_etime - globalstats->fs_stime) / 1000000000); 416 417 /* Freeze statistics during update */ 418 filebench_shm->bequiet = 1; 419 420 flowop = filebench_shm->flowoplist; 421 while (flowop) { 422 flowop_t *flowop_master; 423 424 if (flowop->fo_instance == FLOW_MASTER) { 425 flowop = flowop->fo_next; 426 continue; 427 } 428 429 flowop_master = flowop_find_one(flowop->fo_name, 430 FLOW_MASTER); 431 432 /* Roll up per-flowop into global stats */ 433 stats_add(&globalstats[flowop->fo_type], 434 &flowop->fo_stats); 435 stats_add(&globalstats[FLOW_TYPE_GLOBAL], 436 &flowop->fo_stats); 437 438 if (flowop_master && IS_FLOW_ACTIVE(flowop)) { 439 flowop_master->fo_stats.fs_active++; 440 } 441 442 if (flowop_master) { 443 /* Roll up per-flow stats into master */ 444 flowop_master->fo_stats.fs_children++; 445 stats_add(&flowop_master->fo_stats, &flowop->fo_stats); 446 } else { 447 filebench_log(LOG_DEBUG_NEVER, 448 "flowop_stats could not find %s", 449 flowop->fo_name); 450 } 451 452 filebench_log(LOG_DEBUG_SCRIPT, 453 "flowop %-20s-%4d - %5d ops, %5.1lf, ops/s %5.1lfmb/s " 454 "%8.3fms/op", 455 flowop->fo_name, 456 flowop->fo_instance, 457 flowop->fo_stats.fs_count, 458 flowop->fo_stats.fs_count / 459 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 460 (flowop->fo_stats.fs_bytes / (1024 * 1024)) / 461 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 462 flowop->fo_stats.fs_count ? 463 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] / 464 (flowop->fo_stats.fs_count * 1000000.0) : 0); 465 466 flowop = flowop->fo_next; 467 468 } 469 470 #if defined(HAVE_LIBKSTAT) || defined(LINUX_PORT) 471 cputime = kstats_read_cpu_relative(); 472 #endif /* HAVE_LIBKSTAT */ 473 474 filebench_log(LOG_DEBUG_IMPL, 475 "cputime = %lld, ohead = %lld", 476 cputime / 1000000000, 477 io_stats_ohead() / 1000000000); 478 iostat->fs_syscpu = 479 (cputime > io_stats_ohead()) ? 480 (cputime - io_stats_ohead()) : 0; 481 glstat->fs_syscpu = 482 (cputime > gl_stats_ohead()) ? 483 (cputime - gl_stats_ohead()) : 0; 484 485 486 flowop = filebench_shm->flowoplist; 487 str = malloc(1048576); 488 *str = NULL; 489 (void) strcpy(str, "Per-Operation Breakdown\n"); 490 while (flowop) { 491 char line[1024]; 492 493 if (flowop->fo_instance != FLOW_MASTER) { 494 flowop = flowop->fo_next; 495 continue; 496 } 497 498 (void) snprintf(line, sizeof (line), "%-20s %8.0lfops/s " 499 "%5.1lfmb/s %8.1fms/op %8.0fus/op-cpu\n", 500 flowop->fo_name, 501 flowop->fo_stats.fs_count / 502 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 503 (flowop->fo_stats.fs_bytes / (1024 * 1024)) / 504 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 505 flowop->fo_stats.fs_count ? 506 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] / 507 (flowop->fo_stats.fs_count * 1000000.0) : 0, 508 flowop->fo_stats.fs_count ? 509 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] / 510 (flowop->fo_stats.fs_count * 1000.0) : 0); 511 (void) strcat(str, line); 512 513 flowop = flowop->fo_next; 514 } 515 516 filebench_log(LOG_INFO, "%s", str); 517 free(str); 518 519 filebench_log(LOG_INFO, 520 "\nIO Summary: %5d ops %5.1lf ops/s, (%0.0lf/%0.0lf r/w) " 521 "%5.1lfmb/s, %6.0fus cpu/op, %5.1fms latency", 522 iostat->fs_count + aiostat->fs_count, 523 (iostat->fs_count + aiostat->fs_count) / 524 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 525 (iostat->fs_rcount + aiostat->fs_rcount) / 526 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 527 (iostat->fs_wcount + aiostat->fs_wcount) / 528 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 529 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) / 530 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 531 (iostat->fs_rcount + iostat->fs_wcount + 532 aiostat->fs_rcount + aiostat->fs_wcount) ? 533 (iostat->fs_syscpu / 1000.0) / 534 (iostat->fs_rcount + iostat->fs_wcount + 535 aiostat->fs_rcount + aiostat->fs_wcount) : 0, 536 (iostat->fs_rcount + iostat->fs_wcount) ? 537 iostat->fs_mstate[FLOW_MSTATE_LAT] / 538 ((iostat->fs_rcount + iostat->fs_wcount) * 1000000.0) : 0); 539 540 541 filebench_shm->bequiet = 0; 542 } 543 544 /* 545 * Dumps the per-operation statistics and global statistics to the dump file. 546 */ 547 void 548 stats_dump(char *filename) 549 { 550 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 551 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 552 flowop_t *flowop; 553 554 /* don't dump stats if run ended in error */ 555 if (filebench_shm->f_abort == FILEBENCH_ABORT_ERROR) 556 return; 557 558 (void) strcpy(filebench_shm->dump_filename, filename); 559 560 filebench_log(LOG_INFO, "in statsdump %s", filename); 561 562 if (filebench_shm->dump_fd > 0) { 563 (void) close(filebench_shm->dump_fd); 564 filebench_shm->dump_fd = -1; 565 } 566 567 filebench_log(LOG_DUMP, "Flowop totals:"); 568 569 flowop = filebench_shm->flowoplist; 570 while (flowop) { 571 572 if (flowop->fo_instance != FLOW_MASTER) { 573 flowop = flowop->fo_next; 574 continue; 575 } 576 577 filebench_log(LOG_DUMP, 578 "%-20s %8.0lfops/s %5.1lfmb/s " 579 "%8.1fms/op %8.0fus/op-cpu", 580 flowop->fo_name, 581 flowop->fo_stats.fs_count / 582 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 583 (flowop->fo_stats.fs_bytes / (1024 * 1024)) / 584 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 585 flowop->fo_stats.fs_count ? 586 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] / 587 (flowop->fo_stats.fs_count * 1000000.0) : 0, 588 flowop->fo_stats.fs_count ? 589 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] / 590 (flowop->fo_stats.fs_count * 1000.0) : 0); 591 592 flowop = flowop->fo_next; 593 } 594 595 filebench_log(LOG_DUMP, ""); 596 filebench_log(LOG_DUMP, 597 "IO Summary: %8d ops %8.1lf ops/s, %8.0lf/%0.0lf r/w" 598 "%8.1lfmb/s, %8.0fuscpu/op", 599 600 iostat->fs_count + aiostat->fs_count, 601 (iostat->fs_count + aiostat->fs_count) / 602 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 603 604 (iostat->fs_rcount + aiostat->fs_rcount) / 605 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 606 607 (iostat->fs_wcount + aiostat->fs_wcount) / 608 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 609 610 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) / 611 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS), 612 613 (iostat->fs_rcount + iostat->fs_wcount + 614 aiostat->fs_rcount + aiostat->fs_wcount) ? 615 (iostat->fs_syscpu / 1000.0) / 616 (iostat->fs_rcount + iostat->fs_wcount + 617 aiostat->fs_rcount + aiostat->fs_wcount) : 0); 618 } 619 620 /* 621 * Same as stats_dump, but in xml format. 622 */ 623 void 624 stats_xmldump(char *filename) 625 { 626 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO]; 627 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO]; 628 flowop_t *flowop; 629 630 /* don't dump stats if run ended in error */ 631 if (filebench_shm->f_abort == FILEBENCH_ABORT_ERROR) 632 return; 633 634 (void) strcpy(filebench_shm->dump_filename, filename); 635 636 if (filebench_shm->dump_fd > 0) { 637 (void) close(filebench_shm->dump_fd); 638 filebench_shm->dump_fd = -1; 639 } 640 641 filebench_log(LOG_DUMP, "<stat_doc name=\"Filebench Workload\">"); 642 filebench_log(LOG_DUMP, "<stat_group name=\"Flowop totals\">"); 643 filebench_log(LOG_DUMP, "<cell_list>"); 644 645 flowop = filebench_shm->flowoplist; 646 while (flowop) { 647 if (flowop->fo_instance != FLOW_MASTER) { 648 flowop = flowop->fo_next; 649 continue; 650 } 651 652 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>", 653 flowop->fo_stats.fs_count / 654 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 655 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>", 656 (flowop->fo_stats.fs_bytes / (1024 * 1024)) / 657 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 658 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>", 659 flowop->fo_stats.fs_count ? 660 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] / 661 (flowop->fo_stats.fs_count * 1000000.0) : 0); 662 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>", 663 flowop->fo_stats.fs_count ? 664 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] / 665 (flowop->fo_stats.fs_count * 1000.0) : 0); 666 667 flowop = flowop->fo_next; 668 } 669 filebench_log(LOG_DUMP, "</cell_list>"); 670 671 filebench_log(LOG_DUMP, "<dim_list>"); 672 filebench_log(LOG_DUMP, "<dim>"); 673 filebench_log(LOG_DUMP, "<dimval>Operations/sec</dimval>"); 674 filebench_log(LOG_DUMP, "<dimval>MB/sec</dimval>"); 675 filebench_log(LOG_DUMP, "<dimval>Latency (ms per operation)</dimval>"); 676 filebench_log(LOG_DUMP, "<dimval>CPU (us per operation)</dimval>"); 677 filebench_log(LOG_DUMP, "</dim>"); 678 679 filebench_log(LOG_DUMP, "<dim>"); 680 flowop = filebench_shm->flowoplist; 681 while (flowop) { 682 if (flowop->fo_instance != FLOW_MASTER) { 683 flowop = flowop->fo_next; 684 continue; 685 } 686 filebench_log(LOG_DUMP, "<dimval>%s</dimval>", flowop->fo_name); 687 flowop = flowop->fo_next; 688 } 689 filebench_log(LOG_DUMP, "</dim>"); 690 filebench_log(LOG_DUMP, "</dim_list>"); 691 filebench_log(LOG_DUMP, "</stat_group>"); 692 693 filebench_log(LOG_DUMP, "<stat_group name=\"IO Summary\">"); 694 filebench_log(LOG_DUMP, "<cell_list>"); 695 filebench_log(LOG_DUMP, "<cell>%0d</cell>", 696 iostat->fs_count + aiostat->fs_count); 697 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>", 698 (iostat->fs_count + aiostat->fs_count) / 699 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 700 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>", 701 (iostat->fs_rcount + aiostat->fs_rcount) / 702 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 703 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>", 704 (iostat->fs_wcount + aiostat->fs_wcount) / 705 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 706 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>", 707 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) / 708 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS)); 709 filebench_log(LOG_DUMP, "<cell>%0.0f</cell>", 710 (iostat->fs_rcount + iostat->fs_wcount + aiostat->fs_rcount + 711 aiostat->fs_wcount) ? (iostat->fs_syscpu / 1000.0) / 712 (iostat->fs_rcount + iostat->fs_wcount + aiostat->fs_rcount + 713 aiostat->fs_wcount) : 0); 714 filebench_log(LOG_DUMP, "</cell_list>"); 715 716 filebench_log(LOG_DUMP, "<dim_list>"); 717 filebench_log(LOG_DUMP, "<dim>"); 718 filebench_log(LOG_DUMP, "<dimval>Operations</dimval>"); 719 filebench_log(LOG_DUMP, "<dimval>Operations/sec</dimval>"); 720 filebench_log(LOG_DUMP, "<dimval>Reads</dimval>"); 721 filebench_log(LOG_DUMP, "<dimval>Writes</dimval>"); 722 filebench_log(LOG_DUMP, "<dimval>MB/sec</dimval>"); 723 filebench_log(LOG_DUMP, "<dimval>CPU (us per operation)</dimval>"); 724 filebench_log(LOG_DUMP, "</dim>"); 725 726 filebench_log(LOG_DUMP, "<dim>"); 727 filebench_log(LOG_DUMP, "<dimval>IO Summary</dimval>"); 728 filebench_log(LOG_DUMP, "</dim>"); 729 filebench_log(LOG_DUMP, "</dim_list>"); 730 filebench_log(LOG_DUMP, "</stat_group>"); 731 filebench_log(LOG_DUMP, "</stat_doc>"); 732 } 733 734 /* 735 * Clears all the statistics variables (fo_stats) for every defined flowop. 736 * It also creates a global flowstat table if one doesn't already exist and 737 * clears it. 738 */ 739 void 740 stats_clear(void) 741 { 742 flowop_t *flowop; 743 744 #ifdef HAVE_LIBKSTAT 745 stats_cputime = kstats_read_cpu(); 746 #else 747 stats_cputime = 0; 748 #endif /* HAVE_LIBKSTAT */ 749 750 if (globalstats == NULL) 751 globalstats = malloc(FLOW_TYPES * sizeof (flowstat_t)); 752 753 (void) memset(globalstats, 0, FLOW_TYPES * sizeof (flowstat_t)); 754 755 flowop = filebench_shm->flowoplist; 756 757 while (flowop) { 758 filebench_log(LOG_DEBUG_IMPL, "Clearing stats for %s-%d", 759 flowop->fo_name, 760 flowop->fo_instance); 761 (void) memset(&flowop->fo_stats, 0, sizeof (flowstat_t)); 762 flowop = flowop->fo_next; 763 } 764 765 (void) memset(globalstats, 0, sizeof (flowstat_t)); 766 globalstats->fs_stime = gethrtime(); 767 } 768