1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include "config.h"
27
28 #include <stdio.h>
29 #include <fcntl.h>
30 #include <sys/types.h>
31
32 #ifdef HAVE_SYSINFO
33 #include <sys/sysinfo.h>
34 #endif
35
36 #ifdef HAVE_LIBKSTAT
37 #include <kstat.h>
38 #include <sys/cpuvar.h>
39 #endif /* HAVE_LIBKSTAT */
40
41 #include <stdarg.h>
42
43 #include "filebench.h"
44 #include "flowop.h"
45 #include "vars.h"
46 #include "stats.h"
47
48 /*
49 * A set of routines for collecting and dumping various filebench
50 * run statistics.
51 */
52
53 /* Global statistics */
54 static flowstat_t *globalstats = NULL;
55
56 static hrtime_t stats_cputime = 0;
57
58 #ifdef HAVE_LIBKSTAT
59 static kstat_ctl_t *kstatp = NULL;
60 static kstat_t *sysinfo_ksp = NULL;
61 static kstat_t **cpu_kstat_list = NULL;
62 static int kstat_ncpus = 0;
63
64 static int
stats_build_kstat_list(void)65 stats_build_kstat_list(void)
66 {
67 kstat_t *ksp;
68
69 kstat_ncpus = 0;
70 for (ksp = kstatp->kc_chain; ksp; ksp = ksp->ks_next)
71 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0)
72 kstat_ncpus++;
73
74 if ((cpu_kstat_list = (kstat_t **)
75 malloc(kstat_ncpus * sizeof (kstat_t *))) == NULL) {
76 filebench_log(LOG_ERROR, "malloc failed");
77 return (FILEBENCH_ERROR);
78 }
79
80 kstat_ncpus = 0;
81 for (ksp = kstatp->kc_chain; ksp; ksp = ksp->ks_next)
82 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0 &&
83 kstat_read(kstatp, ksp, NULL) != -1)
84 cpu_kstat_list[kstat_ncpus++] = ksp;
85
86 if (kstat_ncpus == 0) {
87 filebench_log(LOG_ERROR,
88 "kstats can't find any cpu statistics");
89 return (FILEBENCH_ERROR);
90 }
91
92 return (FILEBENCH_OK);
93 }
94
95 static int
stats_kstat_update(void)96 stats_kstat_update(void)
97 {
98 if (kstatp == NULL) {
99 if ((kstatp = kstat_open()) == (kstat_ctl_t *)NULL) {
100 filebench_log(LOG_ERROR, "Cannot read kstats");
101 return (FILEBENCH_ERROR);
102 }
103 }
104
105 /* get the sysinfo kstat */
106 if (sysinfo_ksp == NULL)
107 sysinfo_ksp = kstat_lookup(kstatp, "unix", 0, "sysinfo");
108
109 /* get per cpu kstats, if necessary */
110 if (cpu_kstat_list == NULL) {
111
112 /* Initialize the array of cpu kstat pointers */
113 if (stats_build_kstat_list() == FILEBENCH_ERROR)
114 return (FILEBENCH_ERROR);
115
116 } else if (kstat_chain_update(kstatp) != 0) {
117
118 /* free up current array of kstat ptrs and get new one */
119 free((void *)cpu_kstat_list);
120 if (stats_build_kstat_list() == FILEBENCH_ERROR)
121 return (FILEBENCH_ERROR);
122 }
123
124 return (FILEBENCH_OK);
125 }
126
127 /*
128 * Uses the kstat library or, if it is not available, the /proc/stat file
129 * to obtain cpu statistics. Collects statistics for each cpu, initializes
130 * a local pointer to the sysinfo kstat, and returns the sum of user and
131 * kernel time for all the cpus.
132 */
133 static fbint_t
kstats_read_cpu(void)134 kstats_read_cpu(void)
135 {
136 u_longlong_t cputime_states[CPU_STATES];
137 hrtime_t cputime;
138 int i;
139
140 /*
141 * Per-CPU statistics
142 */
143
144 if (stats_kstat_update() == FILEBENCH_ERROR)
145 return (0);
146
147 /* Sum across all CPUs */
148 (void) memset(&cputime_states, 0, sizeof (cputime_states));
149 for (i = 0; i < kstat_ncpus; i++) {
150 cpu_stat_t cpu_stats;
151 int j;
152
153 (void) kstat_read(kstatp, cpu_kstat_list[i],
154 (void *) &cpu_stats);
155 for (j = 0; j < CPU_STATES; j++)
156 cputime_states[j] += cpu_stats.cpu_sysinfo.cpu[j];
157 }
158
159 cputime = cputime_states[CPU_KERNEL] + cputime_states[CPU_USER];
160
161 return (10000000LL * cputime);
162 }
163 #else /* HAVE_LIBKSTAT */
164 #ifdef HAVE_PROC_STAT
165 static FILE *statfd = 0;
166 fbint_t
kstats_read_cpu(void)167 kstats_read_cpu(void)
168 {
169 /*
170 * Linux provides system wide statistics in /proc/stat
171 * The entry for cpu is
172 * cpu 1636 67 1392 208671 5407 20 12
173 * cpu0 626 8 997 104476 2499 7 7
174 * cpu1 1010 58 395 104195 2907 13 5
175 *
176 * The number of jiffies (1/100ths of a second) that the
177 * system spent in user mode, user mode with low priority
178 * (nice), system mode, and the idle task, respectively.
179 */
180 unsigned int user, nice, system;
181 char cpu[128]; /* placeholder to read "cpu" */
182 if (statfd == 0) {
183 statfd = fopen("/proc/stat", "r");
184 if (statfd < 0) {
185 filebench_log(LOG_ERROR, "Cannot open /proc/stat");
186 return (-1);
187 }
188 }
189 if (fscanf(statfd, "%s %u %u %u", cpu, &user, &nice, &system) < 0) {
190 filebench_log(LOG_ERROR, "Cannot read /proc/stat");
191 return (-1);
192 }
193 /* convert jiffies to nanosecs */
194 return ((user+nice+system)*1000000);
195 }
196
197 #else /* HAVE_PROC_STAT */
198 fbint_t
kstats_read_cpu(void)199 kstats_read_cpu(void)
200 {
201 return (0);
202 }
203 #endif
204 #endif /* HAVE_LIBKSTAT */
205
206 /*
207 * Returns the net cpu time used since the beginning of the run.
208 * Just calls kstat_read_cpu() and subtracts stats_cputime which
209 * is set at the beginning of the filebench run.
210 */
211 static hrtime_t
kstats_read_cpu_relative(void)212 kstats_read_cpu_relative(void)
213 {
214 hrtime_t cputime;
215
216 cputime = kstats_read_cpu();
217 return (cputime - stats_cputime);
218 }
219
220 /*
221 * IO Overhead CPU is the amount of CPU that is incurred running
222 * the benchmark infrastructure.
223 *
224 * It is computed as the sum of micro-state cpu time for each
225 * thread around the op being tested.
226 *
227 * Overhead time is computed for each flow.
228 *
229 * System overhead is computed as the overhead for I/O flows
230 * plus all other time running non-io related flowops
231 *
232 */
233
234 /*
235 * Computes and returns the overhead CPU time attibutable to
236 * IO type flowops.
237 */
238 static hrtime_t
io_stats_ohead(void)239 io_stats_ohead(void)
240 {
241 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO];
242 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO];
243 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL];
244
245 filebench_log(LOG_DEBUG_NEVER,
246 "Computing overhead as %llu + %llu - %llu - %llu",
247 (u_longlong_t)glstat->fs_mstate[FLOW_MSTATE_OHEAD],
248 (u_longlong_t)glstat->fs_mstate[FLOW_MSTATE_CPU],
249 (u_longlong_t)iostat->fs_mstate[FLOW_MSTATE_CPU],
250 (u_longlong_t)aiostat->fs_mstate[FLOW_MSTATE_CPU]);
251
252 return ((glstat->fs_mstate[FLOW_MSTATE_OHEAD] +
253 glstat->fs_mstate[FLOW_MSTATE_CPU] -
254 iostat->fs_mstate[FLOW_MSTATE_CPU] -
255 aiostat->fs_mstate[FLOW_MSTATE_CPU]));
256 }
257
258 /*
259 * Returns the total overhead CPU time.
260 */
261 static hrtime_t
gl_stats_ohead(void)262 gl_stats_ohead(void)
263 {
264 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL];
265
266 return (glstat->fs_mstate[FLOW_MSTATE_OHEAD]);
267 }
268
269 /*
270 * Places the value represented by "name" into the var_val.integer field of the
271 * supplied var_t. Compares the supplied "name" with a set of predefined
272 * names and calculates the value from the appropriate globalstats field(s).
273 */
274 var_t *
stats_findvar(var_t * var,char * name)275 stats_findvar(var_t *var, char *name)
276 {
277 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO];
278 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO];
279 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL];
280
281 filebench_log(LOG_DEBUG_IMPL, "reading stats %s", name);
282
283 if (globalstats == NULL)
284 globalstats = malloc(FLOW_TYPES * sizeof (flowstat_t));
285
286 if (strcmp(name, "iocount") == 0) {
287 fbint_t stat;
288
289 stat = iostat->fs_count + aiostat->fs_count;
290 VAR_SET_INT(var, stat);
291 filebench_log(LOG_DEBUG_IMPL, "reading stats %s = %llu",
292 name, (u_longlong_t)stat);
293 return (var);
294 }
295
296 if (strcmp(name, "iorate") == 0) {
297 fbint_t stat;
298
299 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */
300 stat = (iostat->fs_count + aiostat->fs_count) /
301 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS);
302 VAR_SET_INT(var, stat);
303 return (var);
304 }
305
306
307 if (strcmp(name, "ioreadrate") == 0) {
308 fbint_t stat;
309
310 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */
311 stat = (iostat->fs_rcount + aiostat->fs_rcount) /
312 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS);
313 VAR_SET_INT(var, stat);
314 return (var);
315 }
316
317
318 if (strcmp(name, "iowriterate") == 0) {
319 fbint_t stat;
320
321 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */
322 stat = (iostat->fs_wcount + aiostat->fs_wcount) /
323 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS);
324 VAR_SET_INT(var, stat);
325 return (var);
326 }
327
328
329 if (strcmp(name, "iobandwidth") == 0) {
330 fbint_t stat;
331
332 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */
333 stat =
334 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) /
335 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS);
336 VAR_SET_INT(var, stat);
337 return (var);
338 }
339
340 if (strcmp(name, "iolatency") == 0) {
341 fbint_t stat;
342
343 stat = iostat->fs_count ? iostat->fs_mstate[FLOW_MSTATE_LAT] /
344 (iostat->fs_count * 1000UL) : 0;
345 VAR_SET_INT(var, stat);
346 return (var);
347 }
348
349 if (strcmp(name, "iocpu") == 0) {
350 fbint_t stat;
351
352 stat = (iostat->fs_count + aiostat->fs_count) ?
353 (iostat->fs_mstate[FLOW_MSTATE_CPU] +
354 aiostat->fs_mstate[FLOW_MSTATE_CPU]) / ((iostat->fs_count +
355 aiostat->fs_count) * 1000UL) : 0;
356 VAR_SET_INT(var, stat);
357 return (var);
358 }
359
360
361 if (strcmp(name, "oheadcpu") == 0) {
362 fbint_t stat;
363
364 stat = (iostat->fs_count + aiostat->fs_count) ?
365 io_stats_ohead() / ((iostat->fs_count +
366 aiostat->fs_count) * 1000UL) : 0;
367 VAR_SET_INT(var, stat);
368 return (var);
369 }
370
371 if (strcmp(name, "iowait") == 0) {
372 fbint_t stat;
373
374 stat = iostat->fs_count ?
375 iostat->fs_mstate[FLOW_MSTATE_WAIT] /
376 (iostat->fs_count * 1000UL) : 0;
377 VAR_SET_INT(var, stat);
378 return (var);
379 }
380
381 if (strcmp(name, "syscpu") == 0) {
382 fbint_t stat;
383
384 /* LINTED E_ASSIGMENT_CAUSE_LOSS_PREC */
385 stat = glstat->fs_syscpu / 1000.0;
386 VAR_SET_INT(var, stat);
387 return (var);
388 }
389
390 if (strcmp(name, "iocpusys") == 0) {
391 fbint_t stat;
392
393 stat = (iostat->fs_count + aiostat->fs_count) ?
394 iostat->fs_syscpu / ((iostat->fs_count +
395 aiostat->fs_count) * 1000UL) : 0;
396
397 VAR_SET_INT(var, stat);
398 return (var);
399 }
400
401 filebench_log(LOG_DEBUG_IMPL,
402 "error reading stats %s", name);
403
404 return (NULL);
405 }
406
407 /*
408 * Initializes the static variable "stats_cputime" with the
409 * current cpu time, for use by kstats_read_cpu_relative.
410 */
411 void
stats_init(void)412 stats_init(void)
413 {
414 #if defined(HAVE_LIBKSTAT) || defined(LINUX_PORT)
415 stats_cputime = kstats_read_cpu();
416 #else
417 stats_cputime = 0;
418 #endif /* HAVE_LIBKSTAT */
419 }
420
421 /*
422 * Add a flowstat b to a, leave sum in a.
423 */
424 static void
stats_add(flowstat_t * a,flowstat_t * b)425 stats_add(flowstat_t *a, flowstat_t *b)
426 {
427 int i;
428
429 a->fs_count += b->fs_count;
430 a->fs_rcount += b->fs_rcount;
431 a->fs_wcount += b->fs_wcount;
432 a->fs_bytes += b->fs_bytes;
433 a->fs_rbytes += b->fs_rbytes;
434 a->fs_wbytes += b->fs_wbytes;
435
436 for (i = 0; i < FLOW_MSTATES; i++)
437 a->fs_mstate[i] += b->fs_mstate[i];
438 }
439
440 /*
441 * Takes a "snapshot" of the global statistics. Actually, it calculates
442 * them from the local statistics maintained by each flowop.
443 * First the routine pauses filebench, then rolls the statistics for
444 * each flowop into its associated FLOW_MASTER flowop.
445 * Next all the FLOW_MASTER flowops' statistics are written
446 * to the log file followed by the global totals. Then filebench
447 * operation is allowed to resume.
448 */
449 void
stats_snap(void)450 stats_snap(void)
451 {
452 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO];
453 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO];
454 flowstat_t *glstat = &globalstats[FLOW_TYPE_GLOBAL];
455 hrtime_t cputime;
456 flowop_t *flowop;
457 char *str;
458
459 if (globalstats == NULL) {
460 filebench_log(LOG_ERROR,
461 "'stats snap' called before 'stats clear'");
462 return;
463 }
464
465 /* don't print out if run ended in error */
466 if (filebench_shm->shm_f_abort == FILEBENCH_ABORT_ERROR) {
467 filebench_log(LOG_ERROR,
468 "NO VALID RESULTS! FileBench run terminated prematurely");
469 return;
470 }
471
472 globalstats->fs_etime = gethrtime();
473
474 filebench_log(LOG_DEBUG_SCRIPT, "Stats period = %ds",
475 (globalstats->fs_etime - globalstats->fs_stime) / 1000000000);
476
477 /* Freeze statistics during update */
478 filebench_shm->shm_bequiet = 1;
479
480 flowop = filebench_shm->shm_flowoplist;
481 while (flowop) {
482 flowop_t *flowop_master;
483
484 if (flowop->fo_instance <= FLOW_DEFINITION) {
485 flowop = flowop->fo_next;
486 continue;
487 }
488
489 flowop_master = flowop_find_one(flowop->fo_name, FLOW_MASTER);
490
491 /* Roll up per-flowop into global stats */
492 stats_add(&globalstats[flowop->fo_type],
493 &flowop->fo_stats);
494 stats_add(&globalstats[FLOW_TYPE_GLOBAL],
495 &flowop->fo_stats);
496
497 if (flowop_master && IS_FLOW_ACTIVE(flowop)) {
498 flowop_master->fo_stats.fs_active++;
499 }
500
501 if (flowop_master) {
502 /* Roll up per-flow stats into master */
503 flowop_master->fo_stats.fs_children++;
504 stats_add(&flowop_master->fo_stats, &flowop->fo_stats);
505 } else {
506 filebench_log(LOG_DEBUG_NEVER,
507 "flowop_stats could not find %s",
508 flowop->fo_name);
509 }
510
511 filebench_log(LOG_DEBUG_SCRIPT,
512 "flowop %-20s-%4d - %5d ops, %5.1lf, ops/s %5.1lfmb/s "
513 "%8.3fms/op",
514 flowop->fo_name,
515 flowop->fo_instance,
516 flowop->fo_stats.fs_count,
517 flowop->fo_stats.fs_count /
518 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
519 (flowop->fo_stats.fs_bytes / (1024 * 1024)) /
520 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
521 flowop->fo_stats.fs_count ?
522 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] /
523 (flowop->fo_stats.fs_count * 1000000.0) : 0);
524
525 flowop = flowop->fo_next;
526
527 }
528
529 #if defined(HAVE_LIBKSTAT) || defined(LINUX_PORT)
530 cputime = kstats_read_cpu_relative();
531 #endif /* HAVE_LIBKSTAT */
532
533 filebench_log(LOG_DEBUG_IMPL,
534 "cputime = %llu, ohead = %llu",
535 (u_longlong_t)(cputime / 1000000000),
536 (u_longlong_t)(io_stats_ohead() / 1000000000));
537 iostat->fs_syscpu =
538 (cputime > io_stats_ohead()) ?
539 (cputime - io_stats_ohead()) : 0;
540 glstat->fs_syscpu =
541 (cputime > gl_stats_ohead()) ?
542 (cputime - gl_stats_ohead()) : 0;
543
544
545 flowop = filebench_shm->shm_flowoplist;
546 str = malloc(1048576);
547 *str = '\0';
548 (void) strcpy(str, "Per-Operation Breakdown\n");
549 while (flowop) {
550 char line[1024];
551
552 if (flowop->fo_instance != FLOW_MASTER) {
553 flowop = flowop->fo_next;
554 continue;
555 }
556
557 (void) snprintf(line, sizeof (line), "%-20s %8.0lfops/s "
558 "%5.1lfmb/s %8.1fms/op %8.0fus/op-cpu\n",
559 flowop->fo_name,
560 flowop->fo_stats.fs_count /
561 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
562 (flowop->fo_stats.fs_bytes / (1024 * 1024)) /
563 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
564 flowop->fo_stats.fs_count ?
565 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] /
566 (flowop->fo_stats.fs_count * 1000000.0) : 0,
567 flowop->fo_stats.fs_count ?
568 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] /
569 (flowop->fo_stats.fs_count * 1000.0) : 0);
570 (void) strcat(str, line);
571
572 flowop = flowop->fo_next;
573 }
574
575 filebench_log(LOG_INFO, "%s", str);
576 free(str);
577
578 filebench_log(LOG_INFO,
579 "\nIO Summary: %5d ops, %5.1lf ops/s, (%0.0lf/%0.0lf r/w) "
580 "%5.1lfmb/s, %6.0fus cpu/op, %5.1fms latency",
581 iostat->fs_count + aiostat->fs_count,
582 (iostat->fs_count + aiostat->fs_count) /
583 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
584 (iostat->fs_rcount + aiostat->fs_rcount) /
585 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
586 (iostat->fs_wcount + aiostat->fs_wcount) /
587 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
588 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) /
589 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
590 (iostat->fs_rcount + iostat->fs_wcount +
591 aiostat->fs_rcount + aiostat->fs_wcount) ?
592 (iostat->fs_syscpu / 1000.0) /
593 (iostat->fs_rcount + iostat->fs_wcount +
594 aiostat->fs_rcount + aiostat->fs_wcount) : 0,
595 (iostat->fs_rcount + iostat->fs_wcount) ?
596 iostat->fs_mstate[FLOW_MSTATE_LAT] /
597 ((iostat->fs_rcount + iostat->fs_wcount) * 1000000.0) : 0);
598
599
600 filebench_shm->shm_bequiet = 0;
601 }
602
603 /*
604 * Dumps the per-operation statistics and global statistics to the dump file.
605 */
606 void
stats_dump(char * filename)607 stats_dump(char *filename)
608 {
609 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO];
610 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO];
611 flowop_t *flowop;
612
613 /* don't dump stats if run ended in error */
614 if (filebench_shm->shm_f_abort == FILEBENCH_ABORT_ERROR)
615 return;
616
617 (void) strcpy(filebench_shm->shm_dump_filename, filename);
618
619 filebench_log(LOG_INFO, "in statsdump %s", filename);
620
621 if (filebench_shm->shm_dump_fd > 0) {
622 (void) close(filebench_shm->shm_dump_fd);
623 filebench_shm->shm_dump_fd = -1;
624 }
625
626 filebench_log(LOG_DUMP, "Flowop totals:");
627
628 flowop = filebench_shm->shm_flowoplist;
629 while (flowop) {
630
631 if (flowop->fo_instance != FLOW_MASTER) {
632 flowop = flowop->fo_next;
633 continue;
634 }
635
636 filebench_log(LOG_DUMP,
637 "%-20s %8.0lfops/s %5.1lfmb/s "
638 "%8.1fms/op %8.0fus/op-cpu",
639 flowop->fo_name,
640 flowop->fo_stats.fs_count /
641 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
642 (flowop->fo_stats.fs_bytes / (1024 * 1024)) /
643 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
644 flowop->fo_stats.fs_count ?
645 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] /
646 (flowop->fo_stats.fs_count * 1000000.0) : 0,
647 flowop->fo_stats.fs_count ?
648 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] /
649 (flowop->fo_stats.fs_count * 1000.0) : 0);
650
651 flowop = flowop->fo_next;
652 }
653
654 filebench_log(LOG_DUMP, "");
655 filebench_log(LOG_DUMP,
656 "IO Summary: %8d ops %8.1lf ops/s, %8.0lf/%0.0lf r/w"
657 "%8.1lfmb/s, %8.0fuscpu/op",
658
659 iostat->fs_count + aiostat->fs_count,
660 (iostat->fs_count + aiostat->fs_count) /
661 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
662
663 (iostat->fs_rcount + aiostat->fs_rcount) /
664 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
665
666 (iostat->fs_wcount + aiostat->fs_wcount) /
667 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
668
669 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) /
670 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
671
672 (iostat->fs_rcount + iostat->fs_wcount +
673 aiostat->fs_rcount + aiostat->fs_wcount) ?
674 (iostat->fs_syscpu / 1000.0) /
675 (iostat->fs_rcount + iostat->fs_wcount +
676 aiostat->fs_rcount + aiostat->fs_wcount) : 0);
677 }
678
679 /*
680 * Same as stats_dump, but in xml format.
681 */
682 void
stats_xmldump(char * filename)683 stats_xmldump(char *filename)
684 {
685 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO];
686 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO];
687 flowop_t *flowop;
688
689 /* don't dump stats if run ended in error */
690 if (filebench_shm->shm_f_abort == FILEBENCH_ABORT_ERROR)
691 return;
692
693 (void) strcpy(filebench_shm->shm_dump_filename, filename);
694
695 if (filebench_shm->shm_dump_fd > 0) {
696 (void) close(filebench_shm->shm_dump_fd);
697 filebench_shm->shm_dump_fd = -1;
698 }
699
700 filebench_log(LOG_DUMP, "<stat_doc name=\"Filebench Workload\">");
701 filebench_log(LOG_DUMP, "<stat_group name=\"Flowop totals\">");
702 filebench_log(LOG_DUMP, "<cell_list>");
703
704 flowop = filebench_shm->shm_flowoplist;
705 while (flowop) {
706 if (flowop->fo_instance != FLOW_MASTER) {
707 flowop = flowop->fo_next;
708 continue;
709 }
710
711 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>",
712 flowop->fo_stats.fs_count /
713 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS));
714 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>",
715 (flowop->fo_stats.fs_bytes / (1024 * 1024)) /
716 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS));
717 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>",
718 flowop->fo_stats.fs_count ?
719 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] /
720 (flowop->fo_stats.fs_count * 1000000.0) : 0);
721 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>",
722 flowop->fo_stats.fs_count ?
723 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] /
724 (flowop->fo_stats.fs_count * 1000.0) : 0);
725
726 flowop = flowop->fo_next;
727 }
728 filebench_log(LOG_DUMP, "</cell_list>");
729
730 filebench_log(LOG_DUMP, "<dim_list>");
731 filebench_log(LOG_DUMP, "<dim>");
732 filebench_log(LOG_DUMP, "<dimval>Operations/sec</dimval>");
733 filebench_log(LOG_DUMP, "<dimval>MB/sec</dimval>");
734 filebench_log(LOG_DUMP, "<dimval>Latency (ms per operation)</dimval>");
735 filebench_log(LOG_DUMP, "<dimval>CPU (us per operation)</dimval>");
736 filebench_log(LOG_DUMP, "</dim>");
737
738 filebench_log(LOG_DUMP, "<dim>");
739 flowop = filebench_shm->shm_flowoplist;
740 while (flowop) {
741 if (flowop->fo_instance != FLOW_MASTER) {
742 flowop = flowop->fo_next;
743 continue;
744 }
745 filebench_log(LOG_DUMP, "<dimval>%s</dimval>", flowop->fo_name);
746 flowop = flowop->fo_next;
747 }
748 filebench_log(LOG_DUMP, "</dim>");
749 filebench_log(LOG_DUMP, "</dim_list>");
750 filebench_log(LOG_DUMP, "</stat_group>");
751
752 filebench_log(LOG_DUMP, "<stat_group name=\"IO Summary\">");
753 filebench_log(LOG_DUMP, "<cell_list>");
754 filebench_log(LOG_DUMP, "<cell>%0d</cell>",
755 iostat->fs_count + aiostat->fs_count);
756 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>",
757 (iostat->fs_count + aiostat->fs_count) /
758 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS));
759 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>",
760 (iostat->fs_rcount + aiostat->fs_rcount) /
761 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS));
762 filebench_log(LOG_DUMP, "<cell>%0.0lf</cell>",
763 (iostat->fs_wcount + aiostat->fs_wcount) /
764 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS));
765 filebench_log(LOG_DUMP, "<cell>%0.1lf</cell>",
766 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) /
767 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS));
768 filebench_log(LOG_DUMP, "<cell>%0.0f</cell>",
769 (iostat->fs_rcount + iostat->fs_wcount + aiostat->fs_rcount +
770 aiostat->fs_wcount) ? (iostat->fs_syscpu / 1000.0) /
771 (iostat->fs_rcount + iostat->fs_wcount + aiostat->fs_rcount +
772 aiostat->fs_wcount) : 0);
773 filebench_log(LOG_DUMP, "</cell_list>");
774
775 filebench_log(LOG_DUMP, "<dim_list>");
776 filebench_log(LOG_DUMP, "<dim>");
777 filebench_log(LOG_DUMP, "<dimval>Operations</dimval>");
778 filebench_log(LOG_DUMP, "<dimval>Operations/sec</dimval>");
779 filebench_log(LOG_DUMP, "<dimval>Reads</dimval>");
780 filebench_log(LOG_DUMP, "<dimval>Writes</dimval>");
781 filebench_log(LOG_DUMP, "<dimval>MB/sec</dimval>");
782 filebench_log(LOG_DUMP, "<dimval>CPU (us per operation)</dimval>");
783 filebench_log(LOG_DUMP, "</dim>");
784
785 filebench_log(LOG_DUMP, "<dim>");
786 filebench_log(LOG_DUMP, "<dimval>IO Summary</dimval>");
787 filebench_log(LOG_DUMP, "</dim>");
788 filebench_log(LOG_DUMP, "</dim_list>");
789 filebench_log(LOG_DUMP, "</stat_group>");
790 filebench_log(LOG_DUMP, "</stat_doc>");
791 }
792
793 /*
794 * same as stats_dump, but in computer friendly format
795 */
796 void
stats_multidump(char * filename)797 stats_multidump(char *filename)
798 {
799 flowstat_t *iostat = &globalstats[FLOW_TYPE_IO];
800 flowstat_t *aiostat = &globalstats[FLOW_TYPE_AIO];
801 flowop_t *flowop;
802
803 /* don't dump stats if run ended in error */
804 if (filebench_shm->shm_f_abort == FILEBENCH_ABORT_ERROR)
805 return;
806
807 (void) strcpy(filebench_shm->shm_dump_filename, filename);
808
809 filebench_log(LOG_INFO, "in statsmultidump %s", filename);
810
811 if (filebench_shm->shm_dump_fd > 0) {
812 (void) close(filebench_shm->shm_dump_fd);
813 filebench_shm->shm_dump_fd = -1;
814 }
815
816 filebench_log(LOG_DUMP, "Flowop totals:");
817
818 flowop = filebench_shm->shm_flowoplist;
819 while (flowop) {
820
821 if (flowop->fo_instance != FLOW_MASTER) {
822 flowop = flowop->fo_next;
823 continue;
824 }
825
826 filebench_log(LOG_DUMP,
827 "%s\t%1.0lf\t%1.1lf\t%1.1f\t%1.0f",
828 flowop->fo_name,
829 flowop->fo_stats.fs_count /
830 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
831 (flowop->fo_stats.fs_bytes / (1024 * 1024)) /
832 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
833 flowop->fo_stats.fs_count ?
834 flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] /
835 (flowop->fo_stats.fs_count * 1000000.0) : 0,
836 flowop->fo_stats.fs_count ?
837 flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] /
838 (flowop->fo_stats.fs_count * 1000.0) : 0);
839
840 flowop = flowop->fo_next;
841 }
842
843 filebench_log(LOG_DUMP, "");
844 filebench_log(LOG_DUMP,
845 "IO Summary:\n%d\t%1.1lf\t%1.0lf\t%1.0lf\t%1.1lf\t%1.0f\t%1.1f\n",
846
847 iostat->fs_count + aiostat->fs_count,
848
849 (iostat->fs_count + aiostat->fs_count) /
850 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
851
852 (iostat->fs_rcount + aiostat->fs_rcount) /
853 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
854
855 (iostat->fs_wcount + aiostat->fs_wcount) /
856 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
857
858 ((iostat->fs_bytes + aiostat->fs_bytes) / (1024 * 1024)) /
859 ((globalstats->fs_etime - globalstats->fs_stime) / FSECS),
860
861 (iostat->fs_rcount + iostat->fs_wcount +
862 aiostat->fs_rcount + aiostat->fs_wcount) ?
863 (iostat->fs_syscpu / 1000.0) /
864 (iostat->fs_rcount + iostat->fs_wcount +
865 aiostat->fs_rcount + aiostat->fs_wcount) : 0,
866
867 (iostat->fs_rcount + iostat->fs_wcount) ?
868 iostat->fs_mstate[FLOW_MSTATE_LAT] /
869 ((iostat->fs_rcount + iostat->fs_wcount) * 1000000.0) : 0);
870 }
871
872 /*
873 * Clears all the statistics variables (fo_stats) for every defined flowop.
874 * It also creates a global flowstat table if one doesn't already exist and
875 * clears it.
876 */
877 void
stats_clear(void)878 stats_clear(void)
879 {
880 flowop_t *flowop;
881
882 #ifdef HAVE_LIBKSTAT
883 stats_cputime = kstats_read_cpu();
884 #else
885 stats_cputime = 0;
886 #endif /* HAVE_LIBKSTAT */
887
888 if (globalstats == NULL)
889 globalstats = malloc(FLOW_TYPES * sizeof (flowstat_t));
890
891 (void) memset(globalstats, 0, FLOW_TYPES * sizeof (flowstat_t));
892
893 flowop = filebench_shm->shm_flowoplist;
894
895 while (flowop) {
896 filebench_log(LOG_DEBUG_IMPL, "Clearing stats for %s-%d",
897 flowop->fo_name,
898 flowop->fo_instance);
899 (void) memset(&flowop->fo_stats, 0, sizeof (flowstat_t));
900 flowop = flowop->fo_next;
901 }
902
903 (void) memset(globalstats, 0, sizeof (flowstat_t));
904 globalstats->fs_stime = gethrtime();
905 }
906