1 /**
2 * D header file for perf_event_open system call.
3 *
4 * Converted from linux userspace header, comments included.
5 *
6 * Authors: Max Haughton
7 */
8 module core.sys.linux.perf_event;
9 version (linux) : extern (C):
10 @nogc:
11 nothrow:
12 @system:
13
14 import core.sys.posix.sys.ioctl;
15 import core.sys.posix.unistd;
16
17 version (HPPA) version = HPPA_Any;
18 version (HPPA64) version = HPPA_Any;
19 version (PPC) version = PPC_Any;
20 version (PPC64) version = PPC_Any;
21 version (RISCV32) version = RISCV_Any;
22 version (RISCV64) version = RISCV_Any;
23 version (S390) version = IBMZ_Any;
24 version (SPARC) version = SPARC_Any;
25 version (SPARC64) version = SPARC_Any;
26 version (SystemZ) version = IBMZ_Any;
27
version(X86_64)28 version (X86_64)
29 {
30 version (D_X32)
31 enum __NR_perf_event_open = 0x40000000 + 298;
32 else
33 enum __NR_perf_event_open = 298;
34 }
version(X86)35 else version (X86)
36 {
37 enum __NR_perf_event_open = 336;
38 }
version(ARM)39 else version (ARM)
40 {
41 enum __NR_perf_event_open = 364;
42 }
version(AArch64)43 else version (AArch64)
44 {
45 enum __NR_perf_event_open = 241;
46 }
version(HPPA_Any)47 else version (HPPA_Any)
48 {
49 enum __NR_perf_event_open = 318;
50 }
version(IBMZ_Any)51 else version (IBMZ_Any)
52 {
53 enum __NR_perf_event_open = 331;
54 }
version(MIPS32)55 else version (MIPS32)
56 {
57 enum __NR_perf_event_open = 4333;
58 }
version(MIPS64)59 else version (MIPS64)
60 {
61 version (MIPS_N32)
62 enum __NR_perf_event_open = 6296;
63 else version (MIPS_N64)
64 enum __NR_perf_event_open = 5292;
65 else
66 static assert(0, "Architecture not supported");
67 }
version(PPC_Any)68 else version (PPC_Any)
69 {
70 enum __NR_perf_event_open = 319;
71 }
version(RISCV_Any)72 else version (RISCV_Any)
73 {
74 enum __NR_perf_event_open = 241;
75 }
version(SPARC_Any)76 else version (SPARC_Any)
77 {
78 enum __NR_perf_event_open = 327;
79 }
80 else
81 {
82 static assert(0, "Architecture not supported");
83 }
84 extern (C) extern long syscall(long __sysno, ...);
perf_event_open(perf_event_attr * hw_event,pid_t pid,int cpu,int group_fd,ulong flags)85 static long perf_event_open(perf_event_attr* hw_event, pid_t pid, int cpu, int group_fd, ulong flags)
86 {
87 return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags);
88 }
89 /*
90 * User-space ABI bits:
91 */
92
93 /**
94 * attr.type
95 */
96 enum perf_type_id
97 {
98 PERF_TYPE_HARDWARE = 0,
99 PERF_TYPE_SOFTWARE = 1,
100 PERF_TYPE_TRACEPOINT = 2,
101 PERF_TYPE_HW_CACHE = 3,
102 PERF_TYPE_RAW = 4,
103 PERF_TYPE_BREAKPOINT = 5,
104
105 PERF_TYPE_MAX = 6 /* non-ABI */
106 }
107 /**
108 * Generalized performance event event_id types, used by the
109 * attr.event_id parameter of the sys_perf_event_open()
110 * syscall:
111 */
112 enum perf_hw_id
113 {
114 ///
115 PERF_COUNT_HW_CPU_CYCLES = 0,
116 ///
117 PERF_COUNT_HW_INSTRUCTIONS = 1,
118 ///
119 PERF_COUNT_HW_CACHE_REFERENCES = 2,
120 ///
121 PERF_COUNT_HW_CACHE_MISSES = 3,
122 ///
123 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
124 ///
125 PERF_COUNT_HW_BRANCH_MISSES = 5,
126 ///
127 PERF_COUNT_HW_BUS_CYCLES = 6,
128 ///
129 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
130 ///
131 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
132 ///
133 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
134 ///
135 PERF_COUNT_HW_MAX = 10 /* non-ABI */
136 }
137
138 /**
139 * Generalized hardware cache events:
140 *
141 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
142 * { read, write, prefetch } x
143 * { accesses, misses }
144 */
145 enum perf_hw_cache_id
146 {
147 ///
148 PERF_COUNT_HW_CACHE_L1D = 0,
149 ///
150 PERF_COUNT_HW_CACHE_L1I = 1,
151 ///
152 PERF_COUNT_HW_CACHE_LL = 2,
153 ///
154 PERF_COUNT_HW_CACHE_DTLB = 3,
155 ///
156 PERF_COUNT_HW_CACHE_ITLB = 4,
157 ///
158 PERF_COUNT_HW_CACHE_BPU = 5,
159 ///
160 PERF_COUNT_HW_CACHE_NODE = 6,
161 ///
162 PERF_COUNT_HW_CACHE_MAX = 7 /* non-ABI */
163 }
164 ///
165 enum perf_hw_cache_op_id
166 {
167 ///
168 PERF_COUNT_HW_CACHE_OP_READ = 0,
169 ///
170 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
171 ///
172 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
173 ///
174 PERF_COUNT_HW_CACHE_OP_MAX = 3 /* non-ABI */
175 }
176 ///
177 enum perf_hw_cache_op_result_id
178 {
179 ///
180 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
181 ///
182 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
183 ///
184 PERF_COUNT_HW_CACHE_RESULT_MAX = 2 /* non-ABI */
185 }
186
187 /**
188 * Special "software" events provided by the kernel, even if the hardware
189 * does not support performance events. These events measure various
190 * physical and sw events of the kernel (and allow the profiling of them as
191 * well):
192 */
193 enum perf_sw_ids
194 {
195 ///
196 PERF_COUNT_SW_CPU_CLOCK = 0,
197 ///
198 PERF_COUNT_SW_TASK_CLOCK = 1,
199 ///
200 PERF_COUNT_SW_PAGE_FAULTS = 2,
201 ///
202 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
203 ///
204 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
205 ///
206 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
207 ///
208 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
209 ///
210 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
211 ///
212 PERF_COUNT_SW_EMULATION_FAULTS = 8,
213 ///
214 PERF_COUNT_SW_DUMMY = 9,
215 ///
216 PERF_COUNT_SW_BPF_OUTPUT = 10,
217 ///
218 PERF_COUNT_SW_MAX = 11 /* non-ABI */
219 }
220
221 /**
222 * Bits that can be set in attr.sample_type to request information
223 * in the overflow packets.
224 */
225 enum perf_event_sample_format
226 {
227 ///
228 PERF_SAMPLE_IP = 1U << 0,
229 ///
230 PERF_SAMPLE_TID = 1U << 1,
231 ///
232 PERF_SAMPLE_TIME = 1U << 2,
233 ///
234 PERF_SAMPLE_ADDR = 1U << 3,
235 ///
236 PERF_SAMPLE_READ = 1U << 4,
237 ///
238 PERF_SAMPLE_CALLCHAIN = 1U << 5,
239 ///
240 PERF_SAMPLE_ID = 1U << 6,
241 ///
242 PERF_SAMPLE_CPU = 1U << 7,
243 ///
244 PERF_SAMPLE_PERIOD = 1U << 8,
245 ///
246 PERF_SAMPLE_STREAM_ID = 1U << 9,
247 ///
248 PERF_SAMPLE_RAW = 1U << 10,
249 ///
250 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
251 ///
252 PERF_SAMPLE_REGS_USER = 1U << 12,
253 ///
254 PERF_SAMPLE_STACK_USER = 1U << 13,
255 ///
256 PERF_SAMPLE_WEIGHT = 1U << 14,
257 ///
258 PERF_SAMPLE_DATA_SRC = 1U << 15,
259 ///
260 PERF_SAMPLE_IDENTIFIER = 1U << 16,
261 ///
262 PERF_SAMPLE_TRANSACTION = 1U << 17,
263 ///
264 PERF_SAMPLE_REGS_INTR = 1U << 18,
265 ///
266 PERF_SAMPLE_PHYS_ADDR = 1U << 19,
267 ///
268 PERF_SAMPLE_MAX = 1U << 20 /* non-ABI */
269 }
270
271 /**
272 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
273 *
274 * If the user does not pass priv level information via branch_sample_type,
275 * the kernel uses the event's priv level. Branch and event priv levels do
276 * not have to match. Branch priv level is checked for permissions.
277 *
278 * The branch types can be combined, however BRANCH_ANY covers all types
279 * of branches and therefore it supersedes all the other types.
280 */
281 enum perf_branch_sample_type_shift
282 {
283 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /** user branches */
284 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /** kernel branches */
285 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /** hypervisor branches */
286
287 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /** any branch types */
288 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /** any call branch */
289 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /** any return branch */
290 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /** indirect calls */
291 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /** transaction aborts */
292 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /** in transaction */
293 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /** not in transaction */
294 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /** conditional branches */
295
296 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /** call/ret stack */
297 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /** indirect jumps */
298 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /** direct call */
299
300 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /** no flags */
301 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /** no cycles */
302
303 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /** save branch type */
304
305 PERF_SAMPLE_BRANCH_MAX_SHIFT = 17 /** non-ABI */
306 }
307 ///
308 enum perf_branch_sample_type
309 {
310 PERF_SAMPLE_BRANCH_USER = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_USER_SHIFT,
311 PERF_SAMPLE_BRANCH_KERNEL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
312 PERF_SAMPLE_BRANCH_HV = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_HV_SHIFT,
313 PERF_SAMPLE_BRANCH_ANY = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_SHIFT,
314 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
315 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
316 PERF_SAMPLE_BRANCH_IND_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
317 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
318 PERF_SAMPLE_BRANCH_IN_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
319 PERF_SAMPLE_BRANCH_NO_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
320 PERF_SAMPLE_BRANCH_COND = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_COND_SHIFT,
321 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
322 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
323 PERF_SAMPLE_BRANCH_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_CALL_SHIFT,
324 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
325 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
326 PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
327 PERF_SAMPLE_BRANCH_MAX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_MAX_SHIFT
328 }
329
330 /**
331 * Common flow change classification
332 */
333 enum
334 {
335 PERF_BR_UNKNOWN = 0, /** unknown */
336 PERF_BR_COND = 1, /** conditional */
337 PERF_BR_UNCOND = 2, /** unconditional */
338 PERF_BR_IND = 3, /** indirect */
339 PERF_BR_CALL = 4, /** function call */
340 PERF_BR_IND_CALL = 5, /** indirect function call */
341 PERF_BR_RET = 6, /** function return */
342 PERF_BR_SYSCALL = 7, /** syscall */
343 PERF_BR_SYSRET = 8, /** syscall return */
344 PERF_BR_COND_CALL = 9, /** conditional function call */
345 PERF_BR_COND_RET = 10, /** conditional function return */
346 PERF_BR_MAX = 11
347 }
348
349 ///
350 enum PERF_SAMPLE_BRANCH_PLM_ALL = perf_branch_sample_type.PERF_SAMPLE_BRANCH_USER
351 | perf_branch_sample_type.PERF_SAMPLE_BRANCH_KERNEL
352 | perf_branch_sample_type.PERF_SAMPLE_BRANCH_HV;
353
354 /**
355 * Values to determine ABI of the registers dump.
356 */
357 enum perf_sample_regs_abi
358 {
359 ///
360 PERF_SAMPLE_REGS_ABI_NONE = 0,
361 ///
362 PERF_SAMPLE_REGS_ABI_32 = 1,
363 ///
364 PERF_SAMPLE_REGS_ABI_64 = 2
365 }
366
367 /**
368 * Values for the memory transaction event qualifier, mostly for
369 * abort events. Multiple bits can be set.
370 */
371 enum
372 {
373 PERF_TXN_ELISION = 1 << 0, /** From elision */
374 PERF_TXN_TRANSACTION = 1 << 1, /** From transaction */
375 PERF_TXN_SYNC = 1 << 2, /** Instruction is related */
376 PERF_TXN_ASYNC = 1 << 3, /** Instruction not related */
377 PERF_TXN_RETRY = 1 << 4, /** Retry possible */
378 PERF_TXN_CONFLICT = 1 << 5, /** Conflict abort */
379 PERF_TXN_CAPACITY_WRITE = 1 << 6, /** Capacity write abort */
380 PERF_TXN_CAPACITY_READ = 1 << 7, /** Capacity read abort */
381
382 PERF_TXN_MAX = 1 << 8, /** non-ABI */
383
384 /** bits 32..63 are reserved for the abort code */
385
386 ///PERF_TXN_ABORT_MASK = 0xffffffff << 32,
387 PERF_TXN_ABORT_SHIFT = 32
388 }
389
390 /**
391 * The format of the data returned by read() on a perf event fd,
392 * as specified by attr.read_format:
393 * ---
394 * struct read_format {
395 * { u64 value;
396 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
397 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
398 * { u64 id; } && PERF_FORMAT_ID
399 * } && !PERF_FORMAT_GROUP
400 *
401 * { u64 nr;
402 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
403 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
404 * { u64 value;
405 * { u64 id; } && PERF_FORMAT_ID
406 * } cntr[nr];
407 * } && PERF_FORMAT_GROUP
408 * };
409 * ---
410 */
411 enum perf_event_read_format
412 {
413 ///
414 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
415 ///
416 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
417 ///
418 PERF_FORMAT_ID = 1U << 2,
419 ///
420 PERF_FORMAT_GROUP = 1U << 3,
421 PERF_FORMAT_MAX = 1U << 4 /** non-ABI */
422 }
423
424 enum PERF_ATTR_SIZE_VER0 = 64; /** sizeof first published struct */
425 enum PERF_ATTR_SIZE_VER1 = 72; /** add: config2 */
426 enum PERF_ATTR_SIZE_VER2 = 80; /** add: branch_sample_type */
427 enum PERF_ATTR_SIZE_VER3 = 96; /** add: sample_regs_user */
428 /* add: sample_stack_user */
429 enum PERF_ATTR_SIZE_VER4 = 104; /** add: sample_regs_intr */
430 enum PERF_ATTR_SIZE_VER5 = 112; /** add: aux_watermark */
431
432 /**
433 * Hardware event_id to monitor via a performance monitoring event:
434 *
435 * @sample_max_stack: Max number of frame pointers in a callchain,
436 * should be < /proc/sys/kernel/perf_event_max_stack
437 */
438 struct perf_event_attr
439 {
440 /**
441 *Major type: hardware/software/tracepoint/etc.
442 */
443 uint type;
444
445 /**
446 * Size of the attr structure, for fwd/bwd compat.
447 */
448 uint size;
449
450 /**
451 * Type specific configuration information.
452 */
453 ulong config;
454 ///
455 union
456 {
457 ///
458 ulong sample_period;
459 ///
460 ulong sample_freq;
461 }
462 ///
463 ulong sample_type;
464 ///
465 ulong read_format;
466
467 // mixin(bitfields!(
468 // ulong, "disabled", 1,
469 // ulong, "inherit", 1,
470 // ulong, "pinned", 1,
471 // ulong, "exclusive", 1,
472 // ulong, "exclude_user", 1,
473 // ulong, "exclude_kernel", 1,
474 // ulong, "exclude_hv", 1,
475 // ulong, "exclude_idle", 1,
476 // ulong, "mmap", 1,
477 // ulong, "comm", 1,
478 // ulong, "freq", 1,
479 // ulong, "inherit_stat", 1,
480 // ulong, "enable_on_exec", 1,
481 // ulong, "task", 1,
482 // ulong, "watermark", 1,
483 // ulong, "precise_ip", 2,
484 // ulong, "mmap_data", 1,
485 // ulong, "sample_id_all", 1,
486 // ulong, "exclude_host", 1,
487 // ulong, "exclude_guest", 1,
488 // ulong, "exclude_callchain_kernel", 1,
489 // ulong, "exclude_callchain_user", 1,
490 // ulong, "mmap2", 1,
491 // ulong, "comm_exec", 1,
492 // ulong, "use_clockid", 1,
493 // ulong, "context_switch", 1,
494 // ulong, "write_backward", 1,
495 // ulong, "namespaces", 1,
496 // ulong, "__reserved_1", 35));
497 private ulong perf_event_attr_bitmanip;
498 ///
disabledperf_event_attr499 @property ulong disabled() @safe pure nothrow @nogc const
500 {
501 auto result = (perf_event_attr_bitmanip & 1U) >> 0U;
502 return cast(ulong) result;
503 }
504 ///
disabledperf_event_attr505 @property void disabled(ulong v) @safe pure nothrow @nogc
506 {
507 assert(v >= disabled_min,
508 "Value is smaller than the minimum value of bitfield 'disabled'");
509 assert(v <= disabled_max,
510 "Value is greater than the maximum value of bitfield 'disabled'");
511 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
512 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1U)) | (
513 (cast(typeof(perf_event_attr_bitmanip)) v << 0U) & 1U));
514 }
515
516 enum ulong disabled_min = cast(ulong) 0U;
517 enum ulong disabled_max = cast(ulong) 1U;
518 ///
inheritperf_event_attr519 @property ulong inherit() @safe pure nothrow @nogc const
520 {
521 auto result = (perf_event_attr_bitmanip & 2U) >> 1U;
522 return cast(ulong) result;
523 }
524 ///
inheritperf_event_attr525 @property void inherit(ulong v) @safe pure nothrow @nogc
526 {
527 assert(v >= inherit_min,
528 "Value is smaller than the minimum value of bitfield 'inherit'");
529 assert(v <= inherit_max,
530 "Value is greater than the maximum value of bitfield 'inherit'");
531 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
532 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2U)) | (
533 (cast(typeof(perf_event_attr_bitmanip)) v << 1U) & 2U));
534 }
535
536 enum ulong inherit_min = cast(ulong) 0U;
537 enum ulong inherit_max = cast(ulong) 1U;
538 ///
pinnedperf_event_attr539 @property ulong pinned() @safe pure nothrow @nogc const
540 {
541 auto result = (perf_event_attr_bitmanip & 4U) >> 2U;
542 return cast(ulong) result;
543 }
544 ///
pinnedperf_event_attr545 @property void pinned(ulong v) @safe pure nothrow @nogc
546 {
547 assert(v >= pinned_min,
548 "Value is smaller than the minimum value of bitfield 'pinned'");
549 assert(v <= pinned_max,
550 "Value is greater than the maximum value of bitfield 'pinned'");
551 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
552 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4U)) | (
553 (cast(typeof(perf_event_attr_bitmanip)) v << 2U) & 4U));
554 }
555
556 enum ulong pinned_min = cast(ulong) 0U;
557 enum ulong pinned_max = cast(ulong) 1U;
558 ///
exclusiveperf_event_attr559 @property ulong exclusive() @safe pure nothrow @nogc const
560 {
561 auto result = (perf_event_attr_bitmanip & 8U) >> 3U;
562 return cast(ulong) result;
563 }
564 ///
exclusiveperf_event_attr565 @property void exclusive(ulong v) @safe pure nothrow @nogc
566 {
567 assert(v >= exclusive_min,
568 "Value is smaller than the minimum value of bitfield 'exclusive'");
569 assert(v <= exclusive_max,
570 "Value is greater than the maximum value of bitfield 'exclusive'");
571 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
572 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8U)) | (
573 (cast(typeof(perf_event_attr_bitmanip)) v << 3U) & 8U));
574 }
575
576 enum ulong exclusive_min = cast(ulong) 0U;
577 enum ulong exclusive_max = cast(ulong) 1U;
578 ///
exclude_userperf_event_attr579 @property ulong exclude_user() @safe pure nothrow @nogc const
580 {
581 auto result = (perf_event_attr_bitmanip & 16U) >> 4U;
582 return cast(ulong) result;
583 }
584 ///
exclude_userperf_event_attr585 @property void exclude_user(ulong v) @safe pure nothrow @nogc
586 {
587 assert(v >= exclude_user_min,
588 "Value is smaller than the minimum value of bitfield 'exclude_user'");
589 assert(v <= exclude_user_max,
590 "Value is greater than the maximum value of bitfield 'exclude_user'");
591 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
592 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16U)) | (
593 (cast(typeof(perf_event_attr_bitmanip)) v << 4U) & 16U));
594 }
595
596 enum ulong exclude_user_min = cast(ulong) 0U;
597 enum ulong exclude_user_max = cast(ulong) 1U;
598 ///
exclude_kernelperf_event_attr599 @property ulong exclude_kernel() @safe pure nothrow @nogc const
600 {
601 auto result = (perf_event_attr_bitmanip & 32U) >> 5U;
602 return cast(ulong) result;
603 }
604 ///
exclude_kernelperf_event_attr605 @property void exclude_kernel(ulong v) @safe pure nothrow @nogc
606 {
607 assert(v >= exclude_kernel_min,
608 "Value is smaller than the minimum value of bitfield 'exclude_kernel'");
609 assert(v <= exclude_kernel_max,
610 "Value is greater than the maximum value of bitfield 'exclude_kernel'");
611 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
612 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 32U)) | (
613 (cast(typeof(perf_event_attr_bitmanip)) v << 5U) & 32U));
614 }
615
616 enum ulong exclude_kernel_min = cast(ulong) 0U;
617 enum ulong exclude_kernel_max = cast(ulong) 1U;
618 ///
exclude_hvperf_event_attr619 @property ulong exclude_hv() @safe pure nothrow @nogc const
620 {
621 auto result = (perf_event_attr_bitmanip & 64U) >> 6U;
622 return cast(ulong) result;
623 }
624 ///
exclude_hvperf_event_attr625 @property void exclude_hv(ulong v) @safe pure nothrow @nogc
626 {
627 assert(v >= exclude_hv_min,
628 "Value is smaller than the minimum value of bitfield 'exclude_hv'");
629 assert(v <= exclude_hv_max,
630 "Value is greater than the maximum value of bitfield 'exclude_hv'");
631 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
632 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 64U)) | (
633 (cast(typeof(perf_event_attr_bitmanip)) v << 6U) & 64U));
634 }
635
636 enum ulong exclude_hv_min = cast(ulong) 0U;
637 enum ulong exclude_hv_max = cast(ulong) 1U;
638 ///
exclude_idleperf_event_attr639 @property ulong exclude_idle() @safe pure nothrow @nogc const
640 {
641 auto result = (perf_event_attr_bitmanip & 128U) >> 7U;
642 return cast(ulong) result;
643 }
644 ///
exclude_idleperf_event_attr645 @property void exclude_idle(ulong v) @safe pure nothrow @nogc
646 {
647 assert(v >= exclude_idle_min,
648 "Value is smaller than the minimum value of bitfield 'exclude_idle'");
649 assert(v <= exclude_idle_max,
650 "Value is greater than the maximum value of bitfield 'exclude_idle'");
651 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
652 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 128U)) | (
653 (cast(typeof(perf_event_attr_bitmanip)) v << 7U) & 128U));
654 }
655
656 enum ulong exclude_idle_min = cast(ulong) 0U;
657 enum ulong exclude_idle_max = cast(ulong) 1U;
658 ///
mmapperf_event_attr659 @property ulong mmap() @safe pure nothrow @nogc const
660 {
661 auto result = (perf_event_attr_bitmanip & 256U) >> 8U;
662 return cast(ulong) result;
663 }
664 ///
mmapperf_event_attr665 @property void mmap(ulong v) @safe pure nothrow @nogc
666 {
667 assert(v >= mmap_min, "Value is smaller than the minimum value of bitfield 'mmap'");
668 assert(v <= mmap_max, "Value is greater than the maximum value of bitfield 'mmap'");
669 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
670 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 256U)) | (
671 (cast(typeof(perf_event_attr_bitmanip)) v << 8U) & 256U));
672 }
673
674 enum ulong mmap_min = cast(ulong) 0U;
675 enum ulong mmap_max = cast(ulong) 1U;
676 ///
commperf_event_attr677 @property ulong comm() @safe pure nothrow @nogc const
678 {
679 auto result = (perf_event_attr_bitmanip & 512U) >> 9U;
680 return cast(ulong) result;
681 }
682 ///
commperf_event_attr683 @property void comm(ulong v) @safe pure nothrow @nogc
684 {
685 assert(v >= comm_min, "Value is smaller than the minimum value of bitfield 'comm'");
686 assert(v <= comm_max, "Value is greater than the maximum value of bitfield 'comm'");
687 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
688 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 512U)) | (
689 (cast(typeof(perf_event_attr_bitmanip)) v << 9U) & 512U));
690 }
691
692 enum ulong comm_min = cast(ulong) 0U;
693 enum ulong comm_max = cast(ulong) 1U;
694 ///
freqperf_event_attr695 @property ulong freq() @safe pure nothrow @nogc const
696 {
697 auto result = (perf_event_attr_bitmanip & 1024U) >> 10U;
698 return cast(ulong) result;
699 }
700 ///
freqperf_event_attr701 @property void freq(ulong v) @safe pure nothrow @nogc
702 {
703 assert(v >= freq_min, "Value is smaller than the minimum value of bitfield 'freq'");
704 assert(v <= freq_max, "Value is greater than the maximum value of bitfield 'freq'");
705 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
706 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1024U)) | (
707 (cast(typeof(perf_event_attr_bitmanip)) v << 10U) & 1024U));
708 }
709
710 enum ulong freq_min = cast(ulong) 0U;
711 enum ulong freq_max = cast(ulong) 1U;
712 ///
inherit_statperf_event_attr713 @property ulong inherit_stat() @safe pure nothrow @nogc const
714 {
715 auto result = (perf_event_attr_bitmanip & 2048U) >> 11U;
716 return cast(ulong) result;
717 }
718 ///
inherit_statperf_event_attr719 @property void inherit_stat(ulong v) @safe pure nothrow @nogc
720 {
721 assert(v >= inherit_stat_min,
722 "Value is smaller than the minimum value of bitfield 'inherit_stat'");
723 assert(v <= inherit_stat_max,
724 "Value is greater than the maximum value of bitfield 'inherit_stat'");
725 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
726 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2048U)) | (
727 (cast(typeof(perf_event_attr_bitmanip)) v << 11U) & 2048U));
728 }
729
730 enum ulong inherit_stat_min = cast(ulong) 0U;
731 enum ulong inherit_stat_max = cast(ulong) 1U;
732 ///
enable_on_execperf_event_attr733 @property ulong enable_on_exec() @safe pure nothrow @nogc const
734 {
735 auto result = (perf_event_attr_bitmanip & 4096U) >> 12U;
736 return cast(ulong) result;
737 }
738 ///
enable_on_execperf_event_attr739 @property void enable_on_exec(ulong v) @safe pure nothrow @nogc
740 {
741 assert(v >= enable_on_exec_min,
742 "Value is smaller than the minimum value of bitfield 'enable_on_exec'");
743 assert(v <= enable_on_exec_max,
744 "Value is greater than the maximum value of bitfield 'enable_on_exec'");
745 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
746 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4096U)) | (
747 (cast(typeof(perf_event_attr_bitmanip)) v << 12U) & 4096U));
748 }
749
750 enum ulong enable_on_exec_min = cast(ulong) 0U;
751 enum ulong enable_on_exec_max = cast(ulong) 1U;
752 ///
taskperf_event_attr753 @property ulong task() @safe pure nothrow @nogc const
754 {
755 auto result = (perf_event_attr_bitmanip & 8192U) >> 13U;
756 return cast(ulong) result;
757 }
758 ///
taskperf_event_attr759 @property void task(ulong v) @safe pure nothrow @nogc
760 {
761 assert(v >= task_min, "Value is smaller than the minimum value of bitfield 'task'");
762 assert(v <= task_max, "Value is greater than the maximum value of bitfield 'task'");
763 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
764 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8192U)) | (
765 (cast(typeof(perf_event_attr_bitmanip)) v << 13U) & 8192U));
766 }
767
768 enum ulong task_min = cast(ulong) 0U;
769 enum ulong task_max = cast(ulong) 1U;
770 ///
watermarkperf_event_attr771 @property ulong watermark() @safe pure nothrow @nogc const
772 {
773 auto result = (perf_event_attr_bitmanip & 16384U) >> 14U;
774 return cast(ulong) result;
775 }
776 ///
watermarkperf_event_attr777 @property void watermark(ulong v) @safe pure nothrow @nogc
778 {
779 assert(v >= watermark_min,
780 "Value is smaller than the minimum value of bitfield 'watermark'");
781 assert(v <= watermark_max,
782 "Value is greater than the maximum value of bitfield 'watermark'");
783 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
784 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16384U)) | (
785 (cast(typeof(perf_event_attr_bitmanip)) v << 14U) & 16384U));
786 }
787
788 enum ulong watermark_min = cast(ulong) 0U;
789 enum ulong watermark_max = cast(ulong) 1U;
790 ///
precise_ipperf_event_attr791 @property ulong precise_ip() @safe pure nothrow @nogc const
792 {
793 auto result = (perf_event_attr_bitmanip & 98304U) >> 15U;
794 return cast(ulong) result;
795 }
796 ///
precise_ipperf_event_attr797 @property void precise_ip(ulong v) @safe pure nothrow @nogc
798 {
799 assert(v >= precise_ip_min,
800 "Value is smaller than the minimum value of bitfield 'precise_ip'");
801 assert(v <= precise_ip_max,
802 "Value is greater than the maximum value of bitfield 'precise_ip'");
803 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
804 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 98304U)) | (
805 (cast(typeof(perf_event_attr_bitmanip)) v << 15U) & 98304U));
806 }
807
808 enum ulong precise_ip_min = cast(ulong) 0U;
809 enum ulong precise_ip_max = cast(ulong) 3U;
810 ///
mmap_dataperf_event_attr811 @property ulong mmap_data() @safe pure nothrow @nogc const
812 {
813 auto result = (perf_event_attr_bitmanip & 131072U) >> 17U;
814 return cast(ulong) result;
815 }
816 ///
mmap_dataperf_event_attr817 @property void mmap_data(ulong v) @safe pure nothrow @nogc
818 {
819 assert(v >= mmap_data_min,
820 "Value is smaller than the minimum value of bitfield 'mmap_data'");
821 assert(v <= mmap_data_max,
822 "Value is greater than the maximum value of bitfield 'mmap_data'");
823 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
824 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 131072U)) | (
825 (cast(typeof(perf_event_attr_bitmanip)) v << 17U) & 131072U));
826 }
827
828 enum ulong mmap_data_min = cast(ulong) 0U;
829 enum ulong mmap_data_max = cast(ulong) 1U;
830 ///
sample_id_allperf_event_attr831 @property ulong sample_id_all() @safe pure nothrow @nogc const
832 {
833 auto result = (perf_event_attr_bitmanip & 262144U) >> 18U;
834 return cast(ulong) result;
835 }
836 ///
sample_id_allperf_event_attr837 @property void sample_id_all(ulong v) @safe pure nothrow @nogc
838 {
839 assert(v >= sample_id_all_min,
840 "Value is smaller than the minimum value of bitfield 'sample_id_all'");
841 assert(v <= sample_id_all_max,
842 "Value is greater than the maximum value of bitfield 'sample_id_all'");
843 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
844 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 262144U)) | (
845 (cast(typeof(perf_event_attr_bitmanip)) v << 18U) & 262144U));
846 }
847
848 enum ulong sample_id_all_min = cast(ulong) 0U;
849 enum ulong sample_id_all_max = cast(ulong) 1U;
850 ///
exclude_hostperf_event_attr851 @property ulong exclude_host() @safe pure nothrow @nogc const
852 {
853 auto result = (perf_event_attr_bitmanip & 524288U) >> 19U;
854 return cast(ulong) result;
855 }
856 ///
exclude_hostperf_event_attr857 @property void exclude_host(ulong v) @safe pure nothrow @nogc
858 {
859 assert(v >= exclude_host_min,
860 "Value is smaller than the minimum value of bitfield 'exclude_host'");
861 assert(v <= exclude_host_max,
862 "Value is greater than the maximum value of bitfield 'exclude_host'");
863 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
864 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 524288U)) | (
865 (cast(typeof(perf_event_attr_bitmanip)) v << 19U) & 524288U));
866 }
867
868 enum ulong exclude_host_min = cast(ulong) 0U;
869 enum ulong exclude_host_max = cast(ulong) 1U;
870 ///
exclude_guestperf_event_attr871 @property ulong exclude_guest() @safe pure nothrow @nogc const
872 {
873 auto result = (perf_event_attr_bitmanip & 1048576U) >> 20U;
874 return cast(ulong) result;
875 }
876 ///
exclude_guestperf_event_attr877 @property void exclude_guest(ulong v) @safe pure nothrow @nogc
878 {
879 assert(v >= exclude_guest_min,
880 "Value is smaller than the minimum value of bitfield 'exclude_guest'");
881 assert(v <= exclude_guest_max,
882 "Value is greater than the maximum value of bitfield 'exclude_guest'");
883 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
884 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1048576U)) | (
885 (cast(typeof(perf_event_attr_bitmanip)) v << 20U) & 1048576U));
886 }
887
888 enum ulong exclude_guest_min = cast(ulong) 0U;
889 enum ulong exclude_guest_max = cast(ulong) 1U;
890 ///
exclude_callchain_kernelperf_event_attr891 @property ulong exclude_callchain_kernel() @safe pure nothrow @nogc const
892 {
893 auto result = (perf_event_attr_bitmanip & 2097152U) >> 21U;
894 return cast(ulong) result;
895 }
896 ///
exclude_callchain_kernelperf_event_attr897 @property void exclude_callchain_kernel(ulong v) @safe pure nothrow @nogc
898 {
899 assert(v >= exclude_callchain_kernel_min,
900 "Value is smaller than the minimum value of bitfield 'exclude_callchain_kernel'");
901 assert(v <= exclude_callchain_kernel_max,
902 "Value is greater than the maximum value of bitfield 'exclude_callchain_kernel'");
903 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
904 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2097152U)) | (
905 (cast(typeof(perf_event_attr_bitmanip)) v << 21U) & 2097152U));
906 }
907
908 enum ulong exclude_callchain_kernel_min = cast(ulong) 0U;
909 enum ulong exclude_callchain_kernel_max = cast(ulong) 1U;
910 ///
exclude_callchain_userperf_event_attr911 @property ulong exclude_callchain_user() @safe pure nothrow @nogc const
912 {
913 auto result = (perf_event_attr_bitmanip & 4194304U) >> 22U;
914 return cast(ulong) result;
915 }
916 ///
exclude_callchain_userperf_event_attr917 @property void exclude_callchain_user(ulong v) @safe pure nothrow @nogc
918 {
919 assert(v >= exclude_callchain_user_min,
920 "Value is smaller than the minimum value of bitfield 'exclude_callchain_user'");
921 assert(v <= exclude_callchain_user_max,
922 "Value is greater than the maximum value of bitfield 'exclude_callchain_user'");
923 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
924 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4194304U)) | (
925 (cast(typeof(perf_event_attr_bitmanip)) v << 22U) & 4194304U));
926 }
927
928 enum ulong exclude_callchain_user_min = cast(ulong) 0U;
929 enum ulong exclude_callchain_user_max = cast(ulong) 1U;
930 ///
mmap2perf_event_attr931 @property ulong mmap2() @safe pure nothrow @nogc const
932 {
933 auto result = (perf_event_attr_bitmanip & 8388608U) >> 23U;
934 return cast(ulong) result;
935 }
936 ///
mmap2perf_event_attr937 @property void mmap2(ulong v) @safe pure nothrow @nogc
938 {
939 assert(v >= mmap2_min,
940 "Value is smaller than the minimum value of bitfield 'mmap2'");
941 assert(v <= mmap2_max,
942 "Value is greater than the maximum value of bitfield 'mmap2'");
943 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
944 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8388608U)) | (
945 (cast(typeof(perf_event_attr_bitmanip)) v << 23U) & 8388608U));
946 }
947
948 enum ulong mmap2_min = cast(ulong) 0U;
949 enum ulong mmap2_max = cast(ulong) 1U;
950 ///
comm_execperf_event_attr951 @property ulong comm_exec() @safe pure nothrow @nogc const
952 {
953 auto result = (perf_event_attr_bitmanip & 16777216U) >> 24U;
954 return cast(ulong) result;
955 }
956 ///
comm_execperf_event_attr957 @property void comm_exec(ulong v) @safe pure nothrow @nogc
958 {
959 assert(v >= comm_exec_min,
960 "Value is smaller than the minimum value of bitfield 'comm_exec'");
961 assert(v <= comm_exec_max,
962 "Value is greater than the maximum value of bitfield 'comm_exec'");
963 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
964 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16777216U)) | (
965 (cast(typeof(perf_event_attr_bitmanip)) v << 24U) & 16777216U));
966 }
967
968 enum ulong comm_exec_min = cast(ulong) 0U;
969 enum ulong comm_exec_max = cast(ulong) 1U;
970 ///
use_clockidperf_event_attr971 @property ulong use_clockid() @safe pure nothrow @nogc const
972 {
973 auto result = (perf_event_attr_bitmanip & 33554432U) >> 25U;
974 return cast(ulong) result;
975 }
976 ///
use_clockidperf_event_attr977 @property void use_clockid(ulong v) @safe pure nothrow @nogc
978 {
979 assert(v >= use_clockid_min,
980 "Value is smaller than the minimum value of bitfield 'use_clockid'");
981 assert(v <= use_clockid_max,
982 "Value is greater than the maximum value of bitfield 'use_clockid'");
983 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
984 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 33554432U)) | (
985 (cast(typeof(perf_event_attr_bitmanip)) v << 25U) & 33554432U));
986 }
987
988 enum ulong use_clockid_min = cast(ulong) 0U;
989 enum ulong use_clockid_max = cast(ulong) 1U;
990 ///
context_switchperf_event_attr991 @property ulong context_switch() @safe pure nothrow @nogc const
992 {
993 auto result = (perf_event_attr_bitmanip & 67108864U) >> 26U;
994 return cast(ulong) result;
995 }
996 ///
context_switchperf_event_attr997 @property void context_switch(ulong v) @safe pure nothrow @nogc
998 {
999 assert(v >= context_switch_min,
1000 "Value is smaller than the minimum value of bitfield 'context_switch'");
1001 assert(v <= context_switch_max,
1002 "Value is greater than the maximum value of bitfield 'context_switch'");
1003 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
1004 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 67108864U)) | (
1005 (cast(typeof(perf_event_attr_bitmanip)) v << 26U) & 67108864U));
1006 }
1007
1008 enum ulong context_switch_min = cast(ulong) 0U;
1009 enum ulong context_switch_max = cast(ulong) 1U;
1010 ///
write_backwardperf_event_attr1011 @property ulong write_backward() @safe pure nothrow @nogc const
1012 {
1013 auto result = (perf_event_attr_bitmanip & 134217728U) >> 27U;
1014 return cast(ulong) result;
1015 }
1016 ///
write_backwardperf_event_attr1017 @property void write_backward(ulong v) @safe pure nothrow @nogc
1018 {
1019 assert(v >= write_backward_min,
1020 "Value is smaller than the minimum value of bitfield 'write_backward'");
1021 assert(v <= write_backward_max,
1022 "Value is greater than the maximum value of bitfield 'write_backward'");
1023 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
1024 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 134217728U)) | (
1025 (cast(typeof(perf_event_attr_bitmanip)) v << 27U) & 134217728U));
1026 }
1027
1028 enum ulong write_backward_min = cast(ulong) 0U;
1029 enum ulong write_backward_max = cast(ulong) 1U;
1030 ///
namespacesperf_event_attr1031 @property ulong namespaces() @safe pure nothrow @nogc const
1032 {
1033 auto result = (perf_event_attr_bitmanip & 268435456U) >> 28U;
1034 return cast(ulong) result;
1035 }
1036 ///
namespacesperf_event_attr1037 @property void namespaces(ulong v) @safe pure nothrow @nogc
1038 {
1039 assert(v >= namespaces_min,
1040 "Value is smaller than the minimum value of bitfield 'namespaces'");
1041 assert(v <= namespaces_max,
1042 "Value is greater than the maximum value of bitfield 'namespaces'");
1043 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
1044 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 268435456U)) | (
1045 (cast(typeof(perf_event_attr_bitmanip)) v << 28U) & 268435456U));
1046 }
1047
1048 enum ulong namespaces_min = cast(ulong) 0U;
1049 enum ulong namespaces_max = cast(ulong) 1U;
1050 ///
__reserved_1perf_event_attr1051 @property ulong __reserved_1() @safe pure nothrow @nogc const
1052 {
1053 auto result = (perf_event_attr_bitmanip & 18446744073172680704UL) >> 29U;
1054 return cast(ulong) result;
1055 }
1056 ///
__reserved_1perf_event_attr1057 @property void __reserved_1(ulong v) @safe pure nothrow @nogc
1058 {
1059 assert(v >= __reserved_1_min,
1060 "Value is smaller than the minimum value of bitfield '__reserved_1'");
1061 assert(v <= __reserved_1_max,
1062 "Value is greater than the maximum value of bitfield '__reserved_1'");
1063 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))(
1064 (perf_event_attr_bitmanip & (-1 - cast(
1065 typeof(perf_event_attr_bitmanip)) 18446744073172680704UL)) | (
1066 (cast(typeof(perf_event_attr_bitmanip)) v << 29U) & 18446744073172680704UL));
1067 }
1068
1069 enum ulong __reserved_1_min = cast(ulong) 0U;
1070 enum ulong __reserved_1_max = cast(ulong) 34359738367UL;
1071 ///
1072 union
1073 {
1074 uint wakeup_events; /** wakeup every n events */
1075 uint wakeup_watermark; /** bytes before wakeup */
1076 }
1077 ///
1078 uint bp_type;
1079
1080 union
1081 {
1082 ///
1083 ulong bp_addr;
1084 ulong config1; /** extension of config */
1085 }
1086
1087 union
1088 {
1089 ///
1090 ulong bp_len;
1091 ulong config2; /** extension of config1 */
1092 }
1093
1094 ulong branch_sample_type; /** enum perf_branch_sample_type */
1095
1096 /**
1097 * Defines set of user regs to dump on samples.
1098 * See asm/perf_regs.h for details.
1099 */
1100 ulong sample_regs_user;
1101
1102 /**
1103 * Defines size of the user stack to dump on samples.
1104 */
1105 uint sample_stack_user;
1106 ///
1107 int clockid;
1108
1109 /**
1110 * Defines set of regs to dump for each sample
1111 * state captured on:
1112 * - precise = 0: PMU interrupt
1113 * - precise > 0: sampled instruction
1114 *
1115 * See asm/perf_regs.h for details.
1116 */
1117 ulong sample_regs_intr;
1118
1119 /**
1120 * Wakeup watermark for AUX area
1121 */
1122 uint aux_watermark;
1123 ///
1124 ushort sample_max_stack;
1125 /** align to __u64 */
1126 ushort __reserved_2;
1127 }
1128 ///
perf_flags(T)1129 extern (D) auto perf_flags(T)(auto ref T attr)
1130 {
1131 return *(&attr.read_format + 1);
1132 }
1133
1134 /**
1135 * Ioctls that can be done on a perf event fd:
1136 */
1137 enum PERF_EVENT_IOC_ENABLE = _IO('$', 0);
1138 ///
1139 enum PERF_EVENT_IOC_DISABLE = _IO('$', 1);
1140 ///
1141 enum PERF_EVENT_IOC_REFRESH = _IO('$', 2);
1142 ///
1143 enum PERF_EVENT_IOC_RESET = _IO('$', 3);
1144 ///
1145 enum PERF_EVENT_IOC_PERIOD = _IOW!ulong('$', 4);
1146 ///
1147 enum PERF_EVENT_IOC_SET_OUTPUT = _IO('$', 5);
1148 ///
1149 enum PERF_EVENT_IOC_SET_FILTER = _IOW!(char*)('$', 6);
1150 ///
1151 enum PERF_EVENT_IOC_ID = _IOR!(ulong*)('$', 7);
1152 ///
1153 enum PERF_EVENT_IOC_SET_BPF = _IOW!uint('$', 8);
1154 ///
1155 enum PERF_EVENT_IOC_PAUSE_OUTPUT = _IOW!uint('$', 9);
1156
1157 ///
1158 enum perf_event_ioc_flags
1159 {
1160 PERF_IOC_FLAG_GROUP = 1U << 0
1161 }
1162
1163 /**
1164 * Structure of the page that can be mapped via mmap
1165 */
1166 struct perf_event_mmap_page
1167 {
1168 uint version_; /** version number of this structure */
1169 uint compat_version; /** lowest version this is compat with */
1170
1171 /**
1172 * Bits needed to read the hw events in user-space.
1173 * ---
1174 * u32 seq, time_mult, time_shift, index, width;
1175 * u64 count, enabled, running;
1176 * u64 cyc, time_offset;
1177 * s64 pmc = 0;
1178 *
1179 * do {
1180 * seq = pc->lock;
1181 * barrier()
1182 *
1183 * enabled = pc->time_enabled;
1184 * running = pc->time_running;
1185 *
1186 * if (pc->cap_usr_time && enabled != running) {
1187 * cyc = rdtsc();
1188 * time_offset = pc->time_offset;
1189 * time_mult = pc->time_mult;
1190 * time_shift = pc->time_shift;
1191 * }
1192 *
1193 * index = pc->index;
1194 * count = pc->offset;
1195 * if (pc->cap_user_rdpmc && index) {
1196 * width = pc->pmc_width;
1197 * pmc = rdpmc(index - 1);
1198 * }
1199 *
1200 * barrier();
1201 * } while (pc->lock != seq);
1202 * ---
1203 * NOTE: for obvious reason this only works on self-monitoring
1204 * processes.
1205 */
1206 uint lock; /** seqlock for synchronization */
1207 uint index; /** hardware event identifier */
1208 long offset; /** add to hardware event value */
1209 ulong time_enabled; /** time event active */
1210 ulong time_running; /** time event on cpu */
1211 ///
1212 union
1213 {
1214 ///
1215 ulong capabilities;
1216
1217 struct
1218 {
1219 /* mixin(bitfields!(ulong, "cap_bit0", 1, ulong, "cap_bit0_is_deprecated", 1, ulong,
1220 "cap_user_rdpmc", 1, ulong, "cap_user_time", 1, ulong,
1221 "cap_user_time_zero", 1, ulong, "cap_____res", 59)); */
1222
1223 private ulong mmap_page_bitmanip;
1224 ///
cap_bit0__anon4f16a4f2090a::__anon4f16a4f20a081225 @property ulong cap_bit0() @safe pure nothrow @nogc const
1226 {
1227 auto result = (mmap_page_bitmanip & 1U) >> 0U;
1228 return cast(ulong) result;
1229 }
1230 ///
cap_bit0__anon4f16a4f2090a::__anon4f16a4f20a081231 @property void cap_bit0(ulong v) @safe pure nothrow @nogc
1232 {
1233 assert(v >= cap_bit0_min,
1234 "Value is smaller than the minimum value of bitfield 'cap_bit0'");
1235 assert(v <= cap_bit0_max,
1236 "Value is greater than the maximum value of bitfield 'cap_bit0'");
1237 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
1238 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 1U)) | (
1239 (cast(typeof(mmap_page_bitmanip)) v << 0U) & 1U));
1240 }
1241
1242 enum ulong cap_bit0_min = cast(ulong) 0U;
1243 enum ulong cap_bit0_max = cast(ulong) 1U;
1244 ///
cap_bit0_is_deprecated__anon4f16a4f2090a::__anon4f16a4f20a081245 @property ulong cap_bit0_is_deprecated() @safe pure nothrow @nogc const
1246 {
1247 auto result = (mmap_page_bitmanip & 2U) >> 1U;
1248 return cast(ulong) result;
1249 }
1250 ///
cap_bit0_is_deprecated__anon4f16a4f2090a::__anon4f16a4f20a081251 @property void cap_bit0_is_deprecated(ulong v) @safe pure nothrow @nogc
1252 {
1253 assert(v >= cap_bit0_is_deprecated_min,
1254 "Value is smaller than the minimum value of bitfield 'cap_bit0_is_deprecated'");
1255 assert(v <= cap_bit0_is_deprecated_max,
1256 "Value is greater than the maximum value of bitfield 'cap_bit0_is_deprecated'");
1257 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
1258 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 2U)) | (
1259 (cast(typeof(mmap_page_bitmanip)) v << 1U) & 2U));
1260 }
1261
1262 enum ulong cap_bit0_is_deprecated_min = cast(ulong) 0U;
1263 enum ulong cap_bit0_is_deprecated_max = cast(ulong) 1U;
1264 ///
cap_user_rdpmc__anon4f16a4f2090a::__anon4f16a4f20a081265 @property ulong cap_user_rdpmc() @safe pure nothrow @nogc const
1266 {
1267 auto result = (mmap_page_bitmanip & 4U) >> 2U;
1268 return cast(ulong) result;
1269 }
1270 ///
cap_user_rdpmc__anon4f16a4f2090a::__anon4f16a4f20a081271 @property void cap_user_rdpmc(ulong v) @safe pure nothrow @nogc
1272 {
1273 assert(v >= cap_user_rdpmc_min,
1274 "Value is smaller than the minimum value of bitfield 'cap_user_rdpmc'");
1275 assert(v <= cap_user_rdpmc_max,
1276 "Value is greater than the maximum value of bitfield 'cap_user_rdpmc'");
1277 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
1278 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 4U)) | (
1279 (cast(typeof(mmap_page_bitmanip)) v << 2U) & 4U));
1280 }
1281
1282 enum ulong cap_user_rdpmc_min = cast(ulong) 0U;
1283 enum ulong cap_user_rdpmc_max = cast(ulong) 1U;
1284 ///
cap_user_time__anon4f16a4f2090a::__anon4f16a4f20a081285 @property ulong cap_user_time() @safe pure nothrow @nogc const
1286 {
1287 auto result = (mmap_page_bitmanip & 8U) >> 3U;
1288 return cast(ulong) result;
1289 }
1290 ///
cap_user_time__anon4f16a4f2090a::__anon4f16a4f20a081291 @property void cap_user_time(ulong v) @safe pure nothrow @nogc
1292 {
1293 assert(v >= cap_user_time_min,
1294 "Value is smaller than the minimum value of bitfield 'cap_user_time'");
1295 assert(v <= cap_user_time_max,
1296 "Value is greater than the maximum value of bitfield 'cap_user_time'");
1297 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
1298 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 8U)) | (
1299 (cast(typeof(mmap_page_bitmanip)) v << 3U) & 8U));
1300 }
1301
1302 enum ulong cap_user_time_min = cast(ulong) 0U;
1303 enum ulong cap_user_time_max = cast(ulong) 1U;
1304 ///
cap_user_time_zero__anon4f16a4f2090a::__anon4f16a4f20a081305 @property ulong cap_user_time_zero() @safe pure nothrow @nogc const
1306 {
1307 auto result = (mmap_page_bitmanip & 16U) >> 4U;
1308 return cast(ulong) result;
1309 }
1310 ///
cap_user_time_zero__anon4f16a4f2090a::__anon4f16a4f20a081311 @property void cap_user_time_zero(ulong v) @safe pure nothrow @nogc
1312 {
1313 assert(v >= cap_user_time_zero_min,
1314 "Value is smaller than the minimum value of bitfield 'cap_user_time_zero'");
1315 assert(v <= cap_user_time_zero_max,
1316 "Value is greater than the maximum value of bitfield 'cap_user_time_zero'");
1317 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))(
1318 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 16U)) | (
1319 (cast(typeof(mmap_page_bitmanip)) v << 4U) & 16U));
1320 }
1321
1322 enum ulong cap_user_time_zero_min = cast(ulong) 0U;
1323 enum ulong cap_user_time_zero_max = cast(ulong) 1U;
1324 ///
cap_____res__anon4f16a4f2090a::__anon4f16a4f20a081325 @property ulong cap_____res() @safe pure nothrow @nogc const
1326 {
1327 auto result = (mmap_page_bitmanip & 18446744073709551584UL) >> 5U;
1328 return cast(ulong) result;
1329 }
1330 ///
cap_____res__anon4f16a4f2090a::__anon4f16a4f20a081331 @property void cap_____res(ulong v) @safe pure nothrow @nogc
1332 {
1333 assert(v >= cap_____res_min,
1334 "Value is smaller than the minimum value of bitfield 'cap_____res'");
1335 assert(v <= cap_____res_max,
1336 "Value is greater than the maximum value of bitfield 'cap_____res'");
1337 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))((mmap_page_bitmanip & (
1338 -1 - cast(typeof(mmap_page_bitmanip)) 18446744073709551584UL)) | (
1339 (cast(typeof(mmap_page_bitmanip)) v << 5U) & 18446744073709551584UL));
1340 }
1341
1342 enum ulong cap_____res_min = cast(ulong) 0U;
1343 enum ulong cap_____res_max = cast(ulong) 576460752303423487UL;
1344 }
1345 }
1346
1347 /**
1348 * If cap_user_rdpmc this field provides the bit-width of the value
1349 * read using the rdpmc() or equivalent instruction. This can be used
1350 * to sign extend the result like:
1351 *
1352 * pmc <<= 64 - width;
1353 * pmc >>= 64 - width; // signed shift right
1354 * count += pmc;
1355 */
1356 ushort pmc_width;
1357
1358 /**
1359 * If cap_usr_time the below fields can be used to compute the time
1360 * delta since time_enabled (in ns) using rdtsc or similar.
1361 *
1362 * u64 quot, rem;
1363 * u64 delta;
1364 *
1365 * quot = (cyc >> time_shift);
1366 * rem = cyc & (((u64)1 << time_shift) - 1);
1367 * delta = time_offset + quot * time_mult +
1368 * ((rem * time_mult) >> time_shift);
1369 *
1370 * Where time_offset,time_mult,time_shift and cyc are read in the
1371 * seqcount loop described above. This delta can then be added to
1372 * enabled and possible running (if index), improving the scaling:
1373 *
1374 * enabled += delta;
1375 * if (index)
1376 * running += delta;
1377 *
1378 * quot = count / running;
1379 * rem = count % running;
1380 * count = quot * enabled + (rem * enabled) / running;
1381 */
1382 ushort time_shift;
1383 ///
1384 uint time_mult;
1385 ///
1386 ulong time_offset;
1387 /**
1388 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
1389 * from sample timestamps.
1390 *
1391 * time = timestamp - time_zero;
1392 * quot = time / time_mult;
1393 * rem = time % time_mult;
1394 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
1395 *
1396 * And vice versa:
1397 *
1398 * quot = cyc >> time_shift;
1399 * rem = cyc & (((u64)1 << time_shift) - 1);
1400 * timestamp = time_zero + quot * time_mult +
1401 * ((rem * time_mult) >> time_shift);
1402 */
1403 ulong time_zero;
1404 uint size; /** Header size up to __reserved[] fields. */
1405
1406 /**
1407 * Hole for extension of the self monitor capabilities
1408 */
1409
1410 ubyte[948] __reserved; /** align to 1k. */
1411
1412 /**
1413 * Control data for the mmap() data buffer.
1414 *
1415 * User-space reading the @data_head value should issue an smp_rmb(),
1416 * after reading this value.
1417 *
1418 * When the mapping is PROT_WRITE the @data_tail value should be
1419 * written by userspace to reflect the last read data, after issueing
1420 * an smp_mb() to separate the data read from the ->data_tail store.
1421 * In this case the kernel will not over-write unread data.
1422 *
1423 * See perf_output_put_handle() for the data ordering.
1424 *
1425 * data_{offset,size} indicate the location and size of the perf record
1426 * buffer within the mmapped area.
1427 */
1428 ulong data_head; /** head in the data section */
1429 ulong data_tail; /** user-space written tail */
1430 ulong data_offset; /** where the buffer starts */
1431 ulong data_size; /** data buffer size */
1432
1433 /**
1434 * AUX area is defined by aux_{offset,size} fields that should be set
1435 * by the userspace, so that
1436 * ---
1437 * aux_offset >= data_offset + data_size
1438 * ---
1439 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
1440 *
1441 * Ring buffer pointers aux_{head,tail} have the same semantics as
1442 * data_{head,tail} and same ordering rules apply.
1443 */
1444 ulong aux_head;
1445 ///
1446 ulong aux_tail;
1447 ///
1448 ulong aux_offset;
1449 ///
1450 ulong aux_size;
1451 }
1452 ///
1453 enum PERF_RECORD_MISC_CPUMODE_MASK = 7 << 0;
1454 ///
1455 enum PERF_RECORD_MISC_CPUMODE_UNKNOWN = 0 << 0;
1456 ///
1457 enum PERF_RECORD_MISC_KERNEL = 1 << 0;
1458 ///
1459 enum PERF_RECORD_MISC_USER = 2 << 0;
1460 ///
1461 enum PERF_RECORD_MISC_HYPERVISOR = 3 << 0;
1462 ///
1463 enum PERF_RECORD_MISC_GUEST_KERNEL = 4 << 0;
1464 ///
1465 enum PERF_RECORD_MISC_GUEST_USER = 5 << 0;
1466
1467 /**
1468 * Indicates that /proc/PID/maps parsing are truncated by time out.
1469 */
1470 enum PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT = 1 << 12;
1471 /**
1472 * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
1473 * different events so can reuse the same bit position.
1474 * Ditto PERF_RECORD_MISC_SWITCH_OUT.
1475 */
1476 enum PERF_RECORD_MISC_MMAP_DATA = 1 << 13;
1477 ///
1478 enum PERF_RECORD_MISC_COMM_EXEC = 1 << 13;
1479 ///
1480 enum PERF_RECORD_MISC_SWITCH_OUT = 1 << 13;
1481 /**
1482 * Indicates that the content of PERF_SAMPLE_IP points to
1483 * the actual instruction that triggered the event. See also
1484 * perf_event_attr::precise_ip.
1485 */
1486 enum PERF_RECORD_MISC_EXACT_IP = 1 << 14;
1487 /**
1488 * Reserve the last bit to indicate some extended misc field
1489 */
1490 enum PERF_RECORD_MISC_EXT_RESERVED = 1 << 15;
1491 ///
1492 struct perf_event_header
1493 {
1494 ///
1495 uint type;
1496 ///
1497 ushort misc;
1498 ///
1499 ushort size;
1500 }
1501 ///
1502 struct perf_ns_link_info
1503 {
1504 ///
1505 ulong dev;
1506 ///
1507 ulong ino;
1508 }
1509
1510 enum
1511 {
1512 ///
1513 NET_NS_INDEX = 0,
1514 ///
1515 UTS_NS_INDEX = 1,
1516 ///
1517 IPC_NS_INDEX = 2,
1518 ///
1519 PID_NS_INDEX = 3,
1520 ///
1521 USER_NS_INDEX = 4,
1522 ///
1523 MNT_NS_INDEX = 5,
1524 ///
1525 CGROUP_NS_INDEX = 6,
1526 NR_NAMESPACES = 7 /** number of available namespaces */
1527 }
1528 ///
1529 enum perf_event_type
1530 {
1531 /**
1532 * If perf_event_attr.sample_id_all is set then all event types will
1533 * have the sample_type selected fields related to where/when
1534 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
1535 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
1536 * just after the perf_event_header and the fields already present for
1537 * the existing fields, i.e. at the end of the payload. That way a newer
1538 * perf.data file will be supported by older perf tools, with these new
1539 * optional fields being ignored.
1540 * ---
1541 * struct sample_id {
1542 * { u32 pid, tid; } && PERF_SAMPLE_TID
1543 * { u64 time; } && PERF_SAMPLE_TIME
1544 * { u64 id; } && PERF_SAMPLE_ID
1545 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
1546 * { u32 cpu, res; } && PERF_SAMPLE_CPU
1547 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
1548 * } && perf_event_attr::sample_id_all
1549 * ---
1550 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
1551 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
1552 * relative to header.size.
1553 */
1554
1555 /*
1556 * The MMAP events record the PROT_EXEC mappings so that we can
1557 * correlate userspace IPs to code. They have the following structure:
1558 * ---
1559 * struct {
1560 * struct perf_event_header header;
1561 *
1562 * u32 pid, tid;
1563 * u64 addr;
1564 * u64 len;
1565 * u64 pgoff;
1566 * char filename[];
1567 * struct sample_id sample_id;
1568 * };
1569 * ---
1570 */
1571 PERF_RECORD_MMAP = 1,
1572
1573 /**
1574 * ---
1575 * struct {
1576 * struct perf_event_header header;
1577 * u64 id;
1578 * u64 lost;
1579 * struct sample_id sample_id;
1580 * };
1581 * ---
1582 */
1583 PERF_RECORD_LOST = 2,
1584
1585 /**
1586 * ---
1587 * struct {
1588 * struct perf_event_header header;
1589 *
1590 * u32 pid, tid;
1591 * char comm[];
1592 * struct sample_id sample_id;
1593 * };
1594 * ---
1595 */
1596 PERF_RECORD_COMM = 3,
1597
1598 /**
1599 * ---
1600 * struct {
1601 * struct perf_event_header header;
1602 * u32 pid, ppid;
1603 * u32 tid, ptid;
1604 * u64 time;
1605 * struct sample_id sample_id;
1606 * };
1607 * ---
1608 */
1609 PERF_RECORD_EXIT = 4,
1610
1611 /**
1612 * ---
1613 * struct {
1614 * struct perf_event_header header;
1615 * u64 time;
1616 * u64 id;
1617 * u64 stream_id;
1618 * struct sample_id sample_id;
1619 * };
1620 * ---
1621 */
1622 PERF_RECORD_THROTTLE = 5,
1623 PERF_RECORD_UNTHROTTLE = 6,
1624 /**
1625 * ---
1626 * struct {
1627 * struct perf_event_header header;
1628 * u32 pid, ppid;
1629 * u32 tid, ptid;
1630 * u64 time;
1631 * struct sample_id sample_id;
1632 * };
1633 * ---
1634 */
1635 PERF_RECORD_FORK = 7,
1636 /**
1637 * ---
1638 * struct {
1639 * struct perf_event_header header;
1640 * u32 pid, tid;
1641 *
1642 * struct read_format values;
1643 * struct sample_id sample_id;
1644 * };
1645 * ---
1646 */
1647 PERF_RECORD_READ = 8,
1648 /**
1649 * ---
1650 * struct {
1651 * struct perf_event_header header;
1652 *
1653 * #
1654 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
1655 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
1656 * # is fixed relative to header.
1657 * #
1658 *
1659 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
1660 * { u64 ip; } && PERF_SAMPLE_IP
1661 * { u32 pid, tid; } && PERF_SAMPLE_TID
1662 * { u64 time; } && PERF_SAMPLE_TIME
1663 * { u64 addr; } && PERF_SAMPLE_ADDR
1664 * { u64 id; } && PERF_SAMPLE_ID
1665 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
1666 * { u32 cpu, res; } && PERF_SAMPLE_CPU
1667 * { u64 period; } && PERF_SAMPLE_PERIOD
1668 *
1669 * { struct read_format values; } && PERF_SAMPLE_READ
1670 *
1671 * { u64 nr,
1672 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
1673 *
1674 * #
1675 * # The RAW record below is opaque data wrt the ABI
1676 * #
1677 * # That is, the ABI doesn't make any promises wrt to
1678 * # the stability of its content, it may vary depending
1679 * # on event, hardware, kernel version and phase of
1680 * # the moon.
1681 * #
1682 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
1683 * #
1684 *
1685 * { u32 size;
1686 * char data[size];}&& PERF_SAMPLE_RAW
1687 *
1688 * { u64 nr;
1689 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
1690 *
1691 * { u64 abi; # enum perf_sample_regs_abi
1692 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
1693 *
1694 * { u64 size;
1695 * char data[size];
1696 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
1697 *
1698 * { u64 weight; } && PERF_SAMPLE_WEIGHT
1699 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
1700 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
1701 * { u64 abi; # enum perf_sample_regs_abi
1702 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
1703 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
1704 * };
1705 * ---
1706 */
1707 PERF_RECORD_SAMPLE = 9,
1708
1709 /**
1710 * ---
1711 * The MMAP2 records are an augmented version of MMAP, they add
1712 * maj, min, ino numbers to be used to uniquely identify each mapping
1713 *
1714 * struct {
1715 * struct perf_event_header header;
1716 *
1717 * u32 pid, tid;
1718 * u64 addr;
1719 * u64 len;
1720 * u64 pgoff;
1721 * u32 maj;
1722 * u32 min;
1723 * u64 ino;
1724 * u64 ino_generation;
1725 * u32 prot, flags;
1726 * char filename[];
1727 * struct sample_id sample_id;
1728 * };
1729 * ---
1730 */
1731 PERF_RECORD_MMAP2 = 10,
1732
1733 /**
1734 * Records that new data landed in the AUX buffer part.
1735 * ---
1736 * struct {
1737 * struct perf_event_header header;
1738 *
1739 * u64 aux_offset;
1740 * u64 aux_size;
1741 * u64 flags;
1742 * struct sample_id sample_id;
1743 * };
1744 * ---
1745 */
1746 PERF_RECORD_AUX = 11,
1747
1748 /**
1749 * ---
1750 * Indicates that instruction trace has started
1751 *
1752 * struct {
1753 * struct perf_event_header header;
1754 * u32 pid;
1755 * u32 tid;
1756 * };
1757 * ---
1758 */
1759 PERF_RECORD_ITRACE_START = 12,
1760
1761 /**
1762 * Records the dropped/lost sample number.
1763 * ---
1764 * struct {
1765 * struct perf_event_header header;
1766 *
1767 * u64 lost;
1768 * struct sample_id sample_id;
1769 * };
1770 * ---
1771 */
1772 PERF_RECORD_LOST_SAMPLES = 13,
1773
1774 /**
1775 *
1776 * Records a context switch in or out (flagged by
1777 * PERF_RECORD_MISC_SWITCH_OUT). See also
1778 * PERF_RECORD_SWITCH_CPU_WIDE.
1779 * ---
1780 * struct {
1781 * struct perf_event_header header;
1782 * struct sample_id sample_id;
1783 * };
1784 * ---
1785 */
1786 PERF_RECORD_SWITCH = 14,
1787
1788 /**
1789 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
1790 * next_prev_tid that are the next (switching out) or previous
1791 * (switching in) pid/tid.
1792 * ---
1793 * struct {
1794 * struct perf_event_header header;
1795 * u32 next_prev_pid;
1796 * u32 next_prev_tid;
1797 * struct sample_id sample_id;
1798 * };
1799 * ---
1800 */
1801 PERF_RECORD_SWITCH_CPU_WIDE = 15,
1802
1803 /**
1804 * ---
1805 * struct {
1806 * struct perf_event_header header;
1807 * u32 pid;
1808 * u32 tid;
1809 * u64 nr_namespaces;
1810 * { u64 dev, inode; } [nr_namespaces];
1811 * struct sample_id sample_id;
1812 * };
1813 * ---
1814 */
1815 PERF_RECORD_NAMESPACES = 16,
1816
1817 PERF_RECORD_MAX = 17 /* non-ABI */
1818 }
1819 ///
1820 enum PERF_MAX_STACK_DEPTH = 127;
1821 ///
1822 enum PERF_MAX_CONTEXTS_PER_STACK = 8;
1823 ///
1824 enum perf_callchain_context
1825 {
1826 ///
1827 PERF_CONTEXT_HV = cast(ulong)-32,
1828 ///
1829 PERF_CONTEXT_KERNEL = cast(ulong)-128,
1830 ///
1831 PERF_CONTEXT_USER = cast(ulong)-512,
1832 ///
1833 PERF_CONTEXT_GUEST = cast(ulong)-2048,
1834 ///
1835 PERF_CONTEXT_GUEST_KERNEL = cast(ulong)-2176,
1836 ///
1837 PERF_CONTEXT_GUEST_USER = cast(ulong)-2560,
1838 ///
1839 PERF_CONTEXT_MAX = cast(ulong)-4095
1840 }
1841
1842 /**
1843 * PERF_RECORD_AUX::flags bits
1844 */
1845 enum PERF_AUX_FLAG_TRUNCATED = 0x01; /** record was truncated to fit */
1846 enum PERF_AUX_FLAG_OVERWRITE = 0x02; /** snapshot from overwrite mode */
1847 enum PERF_AUX_FLAG_PARTIAL = 0x04; /** record contains gaps */
1848 enum PERF_AUX_FLAG_COLLISION = 0x08; /** sample collided with another */
1849 ///
1850 enum PERF_FLAG_FD_NO_GROUP = 1UL << 0;
1851 ///
1852 enum PERF_FLAG_FD_OUTPUT = 1UL << 1;
1853 enum PERF_FLAG_PID_CGROUP = 1UL << 2; /** pid=cgroup id, per-cpu mode only */
1854 enum PERF_FLAG_FD_CLOEXEC = 1UL << 3; /** O_CLOEXEC */
1855 ///perm_mem_data_src is endian specific.
1856 version (LittleEndian)
1857 {
1858 ///
1859 union perf_mem_data_src
1860 {
1861 ///
1862 ulong val;
1863
1864 struct
1865 {
1866 /* mixin(bitfields!(ulong, "mem_op", 5, ulong, "mem_lvl", 14, ulong,
1867 "mem_snoop", 5, ulong, "mem_lock", 2, ulong, "mem_dtlb", 7, ulong,
1868 "mem_lvl_num", 4, ulong, "mem_remote", 1, ulong,
1869 "mem_snoopx", 2, ulong, "mem_rsvd", 24)); */
1870
1871 private ulong perf_mem_data_src_bitmanip;
1872 ///
1873 @property ulong mem_op() @safe pure nothrow @nogc const
1874 {
1875 auto result = (perf_mem_data_src_bitmanip & 31U) >> 0U;
1876 return cast(ulong) result;
1877 }
1878 ///
1879 @property void mem_op(ulong v) @safe pure nothrow @nogc
1880 {
1881 assert(v >= mem_op_min,
1882 "Value is smaller than the minimum value of bitfield 'mem_op'");
1883 assert(v <= mem_op_max,
1884 "Value is greater than the maximum value of bitfield 'mem_op'");
1885 perf_mem_data_src_bitmanip = cast(
1886 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
1887 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 31U)) | (
1888 (cast(typeof(perf_mem_data_src_bitmanip)) v << 0U) & 31U));
1889 }
1890
1891 enum ulong mem_op_min = cast(ulong) 0U;
1892 enum ulong mem_op_max = cast(ulong) 31U;
1893 ///
1894 @property ulong mem_lvl() @safe pure nothrow @nogc const
1895 {
1896 auto result = (perf_mem_data_src_bitmanip & 524256U) >> 5U;
1897 return cast(ulong) result;
1898 }
1899 ///
1900 @property void mem_lvl(ulong v) @safe pure nothrow @nogc
1901 {
1902 assert(v >= mem_lvl_min,
1903 "Value is smaller than the minimum value of bitfield 'mem_lvl'");
1904 assert(v <= mem_lvl_max,
1905 "Value is greater than the maximum value of bitfield 'mem_lvl'");
1906 perf_mem_data_src_bitmanip = cast(
1907 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
1908 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 524256U)) | (
1909 (cast(typeof(perf_mem_data_src_bitmanip)) v << 5U) & 524256U));
1910 }
1911
1912 enum ulong mem_lvl_min = cast(ulong) 0U;
1913 enum ulong mem_lvl_max = cast(ulong) 16383U;
1914 ///
1915 @property ulong mem_snoop() @safe pure nothrow @nogc const
1916 {
1917 auto result = (perf_mem_data_src_bitmanip & 16252928U) >> 19U;
1918 return cast(ulong) result;
1919 }
1920 ///
1921 @property void mem_snoop(ulong v) @safe pure nothrow @nogc
1922 {
1923 assert(v >= mem_snoop_min,
1924 "Value is smaller than the minimum value of bitfield 'mem_snoop'");
1925 assert(v <= mem_snoop_max,
1926 "Value is greater than the maximum value of bitfield 'mem_snoop'");
1927 perf_mem_data_src_bitmanip = cast(
1928 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
1929 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 16252928U)) | (
1930 (cast(typeof(perf_mem_data_src_bitmanip)) v << 19U) & 16252928U));
1931 }
1932
1933 enum ulong mem_snoop_min = cast(ulong) 0U;
1934 enum ulong mem_snoop_max = cast(ulong) 31U;
1935 ///
1936 @property ulong mem_lock() @safe pure nothrow @nogc const
1937 {
1938 auto result = (perf_mem_data_src_bitmanip & 50331648U) >> 24U;
1939 return cast(ulong) result;
1940 }
1941 ///
1942 @property void mem_lock(ulong v) @safe pure nothrow @nogc
1943 {
1944 assert(v >= mem_lock_min,
1945 "Value is smaller than the minimum value of bitfield 'mem_lock'");
1946 assert(v <= mem_lock_max,
1947 "Value is greater than the maximum value of bitfield 'mem_lock'");
1948 perf_mem_data_src_bitmanip = cast(
1949 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
1950 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 50331648U)) | (
1951 (cast(typeof(perf_mem_data_src_bitmanip)) v << 24U) & 50331648U));
1952 }
1953
1954 enum ulong mem_lock_min = cast(ulong) 0U;
1955 enum ulong mem_lock_max = cast(ulong) 3U;
1956 ///
1957 @property ulong mem_dtlb() @safe pure nothrow @nogc const
1958 {
1959 auto result = (perf_mem_data_src_bitmanip & 8522825728UL) >> 26U;
1960 return cast(ulong) result;
1961 }
1962 ///
1963 @property void mem_dtlb(ulong v) @safe pure nothrow @nogc
1964 {
1965 assert(v >= mem_dtlb_min,
1966 "Value is smaller than the minimum value of bitfield 'mem_dtlb'");
1967 assert(v <= mem_dtlb_max,
1968 "Value is greater than the maximum value of bitfield 'mem_dtlb'");
1969 perf_mem_data_src_bitmanip = cast(
1970 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
1971 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 8522825728UL)) | (
1972 (cast(typeof(perf_mem_data_src_bitmanip)) v << 26U) & 8522825728UL));
1973 }
1974
1975 enum ulong mem_dtlb_min = cast(ulong) 0U;
1976 enum ulong mem_dtlb_max = cast(ulong) 127U;
1977 ///
1978 @property ulong mem_lvl_num() @safe pure nothrow @nogc const
1979 {
1980 auto result = (perf_mem_data_src_bitmanip & 128849018880UL) >> 33U;
1981 return cast(ulong) result;
1982 }
1983 ///
1984 @property void mem_lvl_num(ulong v) @safe pure nothrow @nogc
1985 {
1986 assert(v >= mem_lvl_num_min,
1987 "Value is smaller than the minimum value of bitfield 'mem_lvl_num'");
1988 assert(v <= mem_lvl_num_max,
1989 "Value is greater than the maximum value of bitfield 'mem_lvl_num'");
1990 perf_mem_data_src_bitmanip = cast(
1991 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
1992 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 128849018880UL)) | (
1993 (cast(typeof(perf_mem_data_src_bitmanip)) v << 33U) & 128849018880UL));
1994 }
1995
1996 enum ulong mem_lvl_num_min = cast(ulong) 0U;
1997 enum ulong mem_lvl_num_max = cast(ulong) 15U;
1998 ///
1999 @property ulong mem_remote() @safe pure nothrow @nogc const
2000 {
2001 auto result = (perf_mem_data_src_bitmanip & 137438953472UL) >> 37U;
2002 return cast(ulong) result;
2003 }
2004 ///
2005 @property void mem_remote(ulong v) @safe pure nothrow @nogc
2006 {
2007 assert(v >= mem_remote_min,
2008 "Value is smaller than the minimum value of bitfield 'mem_remote'");
2009 assert(v <= mem_remote_max,
2010 "Value is greater than the maximum value of bitfield 'mem_remote'");
2011 perf_mem_data_src_bitmanip = cast(
2012 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
2013 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 137438953472UL)) | (
2014 (cast(typeof(perf_mem_data_src_bitmanip)) v << 37U) & 137438953472UL));
2015 }
2016
2017 enum ulong mem_remote_min = cast(ulong) 0U;
2018 enum ulong mem_remote_max = cast(ulong) 1U;
2019 ///
2020 @property ulong mem_snoopx() @safe pure nothrow @nogc const
2021 {
2022 auto result = (perf_mem_data_src_bitmanip & 824633720832UL) >> 38U;
2023 return cast(ulong) result;
2024 }
2025 ///
2026 @property void mem_snoopx(ulong v) @safe pure nothrow @nogc
2027 {
2028 assert(v >= mem_snoopx_min,
2029 "Value is smaller than the minimum value of bitfield 'mem_snoopx'");
2030 assert(v <= mem_snoopx_max,
2031 "Value is greater than the maximum value of bitfield 'mem_snoopx'");
2032 perf_mem_data_src_bitmanip = cast(
2033 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & (
2034 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 824633720832UL)) | (
2035 (cast(typeof(perf_mem_data_src_bitmanip)) v << 38U) & 824633720832UL));
2036 }
2037
2038 enum ulong mem_snoopx_min = cast(ulong) 0U;
2039 enum ulong mem_snoopx_max = cast(ulong) 3U;
2040 ///
2041 @property ulong mem_rsvd() @safe pure nothrow @nogc const
2042 {
2043 auto result = (perf_mem_data_src_bitmanip & 18446742974197923840UL) >> 40U;
2044 return cast(ulong) result;
2045 }
2046 ///
2047 @property void mem_rsvd(ulong v) @safe pure nothrow @nogc
2048 {
2049 assert(v >= mem_rsvd_min,
2050 "Value is smaller than the minimum value of bitfield 'mem_rsvd'");
2051 assert(v <= mem_rsvd_max,
2052 "Value is greater than the maximum value of bitfield 'mem_rsvd'");
2053 perf_mem_data_src_bitmanip = cast(
2054 typeof(perf_mem_data_src_bitmanip))(
2055 (perf_mem_data_src_bitmanip & (-1 - cast(
2056 typeof(perf_mem_data_src_bitmanip)) 18446742974197923840UL)) | (
2057 (cast(typeof(perf_mem_data_src_bitmanip)) v << 40U) & 18446742974197923840UL));
2058 }
2059
2060 enum ulong mem_rsvd_min = cast(ulong) 0U;
2061 enum ulong mem_rsvd_max = cast(ulong) 16777215U;
2062
2063 }
2064 }
2065 }
2066 else
2067 {
2068 ///
2069 union perf_mem_data_src
2070 {
2071 ///
2072 ulong val;
2073
2074 struct
2075 {
2076 /* mixin(bitfields!(ulong, "mem_rsvd", 24, ulong, "mem_snoopx", 2, ulong,
2077 "mem_remote", 1, ulong, "mem_lvl_num", 4, ulong, "mem_dtlb", 7, ulong,
2078 "mem_lock", 2, ulong, "mem_snoop", 5, ulong, "mem_lvl",
2079 14, ulong, "mem_op", 5)); */
2080 private ulong perf_mem_data_src;
2081 ///
2082 @property ulong mem_rsvd() @safe pure nothrow @nogc const
2083 {
2084 auto result = (perf_mem_data_src & 16777215U) >> 0U;
2085 return cast(ulong) result;
2086 }
2087 ///
2088 @property void mem_rsvd(ulong v) @safe pure nothrow @nogc
2089 {
2090 assert(v >= mem_rsvd_min,
2091 "Value is smaller than the minimum value of bitfield 'mem_rsvd'");
2092 assert(v <= mem_rsvd_max,
2093 "Value is greater than the maximum value of bitfield 'mem_rsvd'");
2094 perf_mem_data_src = cast(typeof(perf_mem_data_src))(
2095 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 16777215U)) | (
2096 (cast(typeof(perf_mem_data_src)) v << 0U) & 16777215U));
2097 }
2098
2099 enum ulong mem_rsvd_min = cast(ulong) 0U;
2100 enum ulong mem_rsvd_max = cast(ulong) 16777215U;
2101 ///
2102 @property ulong mem_snoopx() @safe pure nothrow @nogc const
2103 {
2104 auto result = (perf_mem_data_src & 50331648U) >> 24U;
2105 return cast(ulong) result;
2106 }
2107 ///
2108 @property void mem_snoopx(ulong v) @safe pure nothrow @nogc
2109 {
2110 assert(v >= mem_snoopx_min,
2111 "Value is smaller than the minimum value of bitfield 'mem_snoopx'");
2112 assert(v <= mem_snoopx_max,
2113 "Value is greater than the maximum value of bitfield 'mem_snoopx'");
2114 perf_mem_data_src = cast(typeof(perf_mem_data_src))(
2115 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 50331648U)) | (
2116 (cast(typeof(perf_mem_data_src)) v << 24U) & 50331648U));
2117 }
2118
2119 enum ulong mem_snoopx_min = cast(ulong) 0U;
2120 enum ulong mem_snoopx_max = cast(ulong) 3U;
2121 ///
2122 @property ulong mem_remote() @safe pure nothrow @nogc const
2123 {
2124 auto result = (perf_mem_data_src & 67108864U) >> 26U;
2125 return cast(ulong) result;
2126 }
2127 ///
2128 @property void mem_remote(ulong v) @safe pure nothrow @nogc
2129 {
2130 assert(v >= mem_remote_min,
2131 "Value is smaller than the minimum value of bitfield 'mem_remote'");
2132 assert(v <= mem_remote_max,
2133 "Value is greater than the maximum value of bitfield 'mem_remote'");
2134 perf_mem_data_src = cast(typeof(perf_mem_data_src))(
2135 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 67108864U)) | (
2136 (cast(typeof(perf_mem_data_src)) v << 26U) & 67108864U));
2137 }
2138
2139 enum ulong mem_remote_min = cast(ulong) 0U;
2140 enum ulong mem_remote_max = cast(ulong) 1U;
2141 ///
2142 @property ulong mem_lvl_num() @safe pure nothrow @nogc const
2143 {
2144 auto result = (perf_mem_data_src & 2013265920U) >> 27U;
2145 return cast(ulong) result;
2146 }
2147 ///
2148 @property void mem_lvl_num(ulong v) @safe pure nothrow @nogc
2149 {
2150 assert(v >= mem_lvl_num_min,
2151 "Value is smaller than the minimum value of bitfield 'mem_lvl_num'");
2152 assert(v <= mem_lvl_num_max,
2153 "Value is greater than the maximum value of bitfield 'mem_lvl_num'");
2154 perf_mem_data_src = cast(typeof(perf_mem_data_src))(
2155 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 2013265920U)) | (
2156 (cast(typeof(perf_mem_data_src)) v << 27U) & 2013265920U));
2157 }
2158
2159 enum ulong mem_lvl_num_min = cast(ulong) 0U;
2160 enum ulong mem_lvl_num_max = cast(ulong) 15U;
2161 ///
2162 @property ulong mem_dtlb() @safe pure nothrow @nogc const
2163 {
2164 auto result = (perf_mem_data_src & 272730423296UL) >> 31U;
2165 return cast(ulong) result;
2166 }
2167 ///
2168 @property void mem_dtlb(ulong v) @safe pure nothrow @nogc
2169 {
2170 assert(v >= mem_dtlb_min,
2171 "Value is smaller than the minimum value of bitfield 'mem_dtlb'");
2172 assert(v <= mem_dtlb_max,
2173 "Value is greater than the maximum value of bitfield 'mem_dtlb'");
2174 perf_mem_data_src = cast(typeof(perf_mem_data_src))(
2175 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 272730423296UL)) | (
2176 (cast(typeof(perf_mem_data_src)) v << 31U) & 272730423296UL));
2177 }
2178
2179 enum ulong mem_dtlb_min = cast(ulong) 0U;
2180 enum ulong mem_dtlb_max = cast(ulong) 127U;
2181 ///
2182 @property ulong mem_lock() @safe pure nothrow @nogc const
2183 {
2184 auto result = (perf_mem_data_src & 824633720832UL) >> 38U;
2185 return cast(ulong) result;
2186 }
2187 ///
2188 @property void mem_lock(ulong v) @safe pure nothrow @nogc
2189 {
2190 assert(v >= mem_lock_min,
2191 "Value is smaller than the minimum value of bitfield 'mem_lock'");
2192 assert(v <= mem_lock_max,
2193 "Value is greater than the maximum value of bitfield 'mem_lock'");
2194 perf_mem_data_src = cast(typeof(perf_mem_data_src))(
2195 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 824633720832UL)) | (
2196 (cast(typeof(perf_mem_data_src)) v << 38U) & 824633720832UL));
2197 }
2198
2199 enum ulong mem_lock_min = cast(ulong) 0U;
2200 enum ulong mem_lock_max = cast(ulong) 3U;
2201 ///
2202 @property ulong mem_snoop() @safe pure nothrow @nogc const
2203 {
2204 auto result = (perf_mem_data_src & 34084860461056UL) >> 40U;
2205 return cast(ulong) result;
2206 }
2207 ///
2208 @property void mem_snoop(ulong v) @safe pure nothrow @nogc
2209 {
2210 assert(v >= mem_snoop_min,
2211 "Value is smaller than the minimum value of bitfield 'mem_snoop'");
2212 assert(v <= mem_snoop_max,
2213 "Value is greater than the maximum value of bitfield 'mem_snoop'");
2214 perf_mem_data_src = cast(typeof(perf_mem_data_src))(
2215 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 34084860461056UL)) | (
2216 (cast(typeof(perf_mem_data_src)) v << 40U) & 34084860461056UL));
2217 }
2218
2219 enum ulong mem_snoop_min = cast(ulong) 0U;
2220 enum ulong mem_snoop_max = cast(ulong) 31U;
2221 ///
2222 @property ulong mem_lvl() @safe pure nothrow @nogc const
2223 {
2224 auto result = (perf_mem_data_src & 576425567931334656UL) >> 45U;
2225 return cast(ulong) result;
2226 }
2227 ///
2228 @property void mem_lvl(ulong v) @safe pure nothrow @nogc
2229 {
2230 assert(v >= mem_lvl_min,
2231 "Value is smaller than the minimum value of bitfield 'mem_lvl'");
2232 assert(v <= mem_lvl_max,
2233 "Value is greater than the maximum value of bitfield 'mem_lvl'");
2234 perf_mem_data_src = cast(typeof(perf_mem_data_src))((perf_mem_data_src & (
2235 -1 - cast(typeof(perf_mem_data_src)) 576425567931334656UL)) | (
2236 (cast(typeof(perf_mem_data_src)) v << 45U) & 576425567931334656UL));
2237 }
2238
2239 enum ulong mem_lvl_min = cast(ulong) 0U;
2240 enum ulong mem_lvl_max = cast(ulong) 16383U;
2241 ///
2242 @property ulong mem_op() @safe pure nothrow @nogc const
2243 {
2244 auto result = (perf_mem_data_src & 17870283321406128128UL) >> 59U;
2245 return cast(ulong) result;
2246 }
2247 ///
2248 @property void mem_op(ulong v) @safe pure nothrow @nogc
2249 {
2250 assert(v >= mem_op_min,
2251 "Value is smaller than the minimum value of bitfield 'mem_op'");
2252 assert(v <= mem_op_max,
2253 "Value is greater than the maximum value of bitfield 'mem_op'");
2254 perf_mem_data_src = cast(typeof(perf_mem_data_src))((perf_mem_data_src & (
2255 -1 - cast(typeof(perf_mem_data_src)) 17870283321406128128UL)) | (
2256 (cast(typeof(perf_mem_data_src)) v << 59U) & 17870283321406128128UL));
2257 }
2258
2259 enum ulong mem_op_min = cast(ulong) 0U;
2260 enum ulong mem_op_max = cast(ulong) 31U;
2261 }
2262 }
2263 }
2264
2265 /* snoop mode, ext */
2266 /* remote */
2267 /* memory hierarchy level number */
2268 /* tlb access */
2269 /* lock instr */
2270 /* snoop mode */
2271 /* memory hierarchy level */
2272 /* type of opcode */
2273
2274 /** type of opcode (load/store/prefetch,code) */
2275 enum PERF_MEM_OP_NA = 0x01; /** not available */
2276 enum PERF_MEM_OP_LOAD = 0x02; /** load instruction */
2277 enum PERF_MEM_OP_STORE = 0x04; /** store instruction */
2278 enum PERF_MEM_OP_PFETCH = 0x08; /** prefetch */
2279 enum PERF_MEM_OP_EXEC = 0x10; /** code (execution) */
2280 enum PERF_MEM_OP_SHIFT = 0;
2281
2282 /* memory hierarchy (memory level, hit or miss) */
2283 enum PERF_MEM_LVL_NA = 0x01; /** not available */
2284 enum PERF_MEM_LVL_HIT = 0x02; /** hit level */
2285 enum PERF_MEM_LVL_MISS = 0x04; /** miss level */
2286 enum PERF_MEM_LVL_L1 = 0x08; /** L1 */
2287 enum PERF_MEM_LVL_LFB = 0x10; /** Line Fill Buffer */
2288 enum PERF_MEM_LVL_L2 = 0x20; /** L2 */
2289 enum PERF_MEM_LVL_L3 = 0x40; /** L3 */
2290 enum PERF_MEM_LVL_LOC_RAM = 0x80; /** Local DRAM */
2291 enum PERF_MEM_LVL_REM_RAM1 = 0x100; /** Remote DRAM (1 hop) */
2292 enum PERF_MEM_LVL_REM_RAM2 = 0x200; /** Remote DRAM (2 hops) */
2293 enum PERF_MEM_LVL_REM_CCE1 = 0x400; /** Remote Cache (1 hop) */
2294 enum PERF_MEM_LVL_REM_CCE2 = 0x800; /** Remote Cache (2 hops) */
2295 enum PERF_MEM_LVL_IO = 0x1000; /** I/O memory */
2296 enum PERF_MEM_LVL_UNC = 0x2000; /** Uncached memory */
2297 ///
2298 enum PERF_MEM_LVL_SHIFT = 5;
2299
2300 enum PERF_MEM_REMOTE_REMOTE = 0x01; /** Remote */
2301 ///
2302 enum PERF_MEM_REMOTE_SHIFT = 37;
2303
2304 enum PERF_MEM_LVLNUM_L1 = 0x01; /** L1 */
2305 enum PERF_MEM_LVLNUM_L2 = 0x02; /** L2 */
2306 enum PERF_MEM_LVLNUM_L3 = 0x03; /** L3 */
2307 enum PERF_MEM_LVLNUM_L4 = 0x04; /** L4 */
2308 /* 5-0xa available */
2309 enum PERF_MEM_LVLNUM_ANY_CACHE = 0x0b; /** Any cache */
2310 enum PERF_MEM_LVLNUM_LFB = 0x0c; /** LFB */
2311 enum PERF_MEM_LVLNUM_RAM = 0x0d; /** RAM */
2312 enum PERF_MEM_LVLNUM_PMEM = 0x0e; /** PMEM */
2313 enum PERF_MEM_LVLNUM_NA = 0x0f; /** N/A */
2314 ///
2315 enum PERF_MEM_LVLNUM_SHIFT = 33;
2316
2317 /* snoop mode */
2318 enum PERF_MEM_SNOOP_NA = 0x01; /** not available */
2319 enum PERF_MEM_SNOOP_NONE = 0x02; /** no snoop */
2320 enum PERF_MEM_SNOOP_HIT = 0x04; /** snoop hit */
2321 enum PERF_MEM_SNOOP_MISS = 0x08; /** snoop miss */
2322 enum PERF_MEM_SNOOP_HITM = 0x10; /** snoop hit modified */
2323 ///
2324 enum PERF_MEM_SNOOP_SHIFT = 19;
2325
2326 enum PERF_MEM_SNOOPX_FWD = 0x01; /** forward */
2327 /** 1 free */
2328 enum PERF_MEM_SNOOPX_SHIFT = 37;
2329
2330 /** locked instruction */
2331 enum PERF_MEM_LOCK_NA = 0x01; /** not available */
2332 enum PERF_MEM_LOCK_LOCKED = 0x02; /** locked transaction */
2333 ///
2334 enum PERF_MEM_LOCK_SHIFT = 24;
2335
2336 /* TLB access */
2337 enum PERF_MEM_TLB_NA = 0x01; /** not available */
2338 enum PERF_MEM_TLB_HIT = 0x02; /** hit level */
2339 enum PERF_MEM_TLB_MISS = 0x04; /** miss level */
2340 enum PERF_MEM_TLB_L1 = 0x08; /** L1 */
2341 enum PERF_MEM_TLB_L2 = 0x10; /** L2 */
2342 enum PERF_MEM_TLB_WK = 0x20; /** Hardware Walker*/
2343 enum PERF_MEM_TLB_OS = 0x40; /** OS fault handler */
2344 ///
2345 enum PERF_MEM_TLB_SHIFT = 26;
2346
2347 /**
2348 * single taken branch record layout:
2349 *
2350 * from: source instruction (may not always be a branch insn)
2351 * to: branch target
2352 * mispred: branch target was mispredicted
2353 * predicted: branch target was predicted
2354 *
2355 * support for mispred, predicted is optional. In case it
2356 * is not supported mispred = predicted = 0.
2357 *
2358 * in_tx: running in a hardware transaction
2359 * abort: aborting a hardware transaction
2360 * cycles: cycles from last branch (or 0 if not supported)
2361 * type: branch type
2362 */
2363 struct perf_branch_entry
2364 {
2365 ///
2366 ulong from;
2367 ///
2368 ulong to;
2369
2370 /* mixin(bitfields!(ulong, "mispred", 1, ulong, "predicted", 1, ulong,
2371 "in_tx", 1, ulong, "abort", 1, ulong, "cycles", 16, ulong, "type",
2372 4, ulong, "reserved", 40)); */
2373 private ulong perf_branch_entry_bitmanip;
2374 ///
2375 @property ulong mispred() @safe pure nothrow @nogc const
2376 {
2377 auto result = (perf_branch_entry_bitmanip & 1U) >> 0U;
2378 return cast(ulong) result;
2379 }
2380 ///
2381 @property void mispred(ulong v) @safe pure nothrow @nogc
2382 {
2383 assert(v >= mispred_min,
2384 "Value is smaller than the minimum value of bitfield 'mispred'");
2385 assert(v <= mispred_max,
2386 "Value is greater than the maximum value of bitfield 'mispred'");
2387 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
2388 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 1U)) | (
2389 (cast(typeof(perf_branch_entry_bitmanip)) v << 0U) & 1U));
2390 }
2391
2392 enum ulong mispred_min = cast(ulong) 0U;
2393 enum ulong mispred_max = cast(ulong) 1U;
2394 ///
2395 @property ulong predicted() @safe pure nothrow @nogc const
2396 {
2397 auto result = (perf_branch_entry_bitmanip & 2U) >> 1U;
2398 return cast(ulong) result;
2399 }
2400 ///
2401 @property void predicted(ulong v) @safe pure nothrow @nogc
2402 {
2403 assert(v >= predicted_min,
2404 "Value is smaller than the minimum value of bitfield 'predicted'");
2405 assert(v <= predicted_max,
2406 "Value is greater than the maximum value of bitfield 'predicted'");
2407 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
2408 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 2U)) | (
2409 (cast(typeof(perf_branch_entry_bitmanip)) v << 1U) & 2U));
2410 }
2411
2412 enum ulong predicted_min = cast(ulong) 0U;
2413 enum ulong predicted_max = cast(ulong) 1U;
2414 ///
2415 @property ulong in_tx() @safe pure nothrow @nogc const
2416 {
2417 auto result = (perf_branch_entry_bitmanip & 4U) >> 2U;
2418 return cast(ulong) result;
2419 }
2420 ///
2421 @property void in_tx(ulong v) @safe pure nothrow @nogc
2422 {
2423 assert(v >= in_tx_min,
2424 "Value is smaller than the minimum value of bitfield 'in_tx'");
2425 assert(v <= in_tx_max,
2426 "Value is greater than the maximum value of bitfield 'in_tx'");
2427 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
2428 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 4U)) | (
2429 (cast(typeof(perf_branch_entry_bitmanip)) v << 2U) & 4U));
2430 }
2431
2432 enum ulong in_tx_min = cast(ulong) 0U;
2433 enum ulong in_tx_max = cast(ulong) 1U;
2434 ///
2435 @property ulong abort() @safe pure nothrow @nogc const
2436 {
2437 auto result = (perf_branch_entry_bitmanip & 8U) >> 3U;
2438 return cast(ulong) result;
2439 }
2440 ///
2441 @property void abort(ulong v) @safe pure nothrow @nogc
2442 {
2443 assert(v >= abort_min,
2444 "Value is smaller than the minimum value of bitfield 'abort'");
2445 assert(v <= abort_max,
2446 "Value is greater than the maximum value of bitfield 'abort'");
2447 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
2448 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 8U)) | (
2449 (cast(typeof(perf_branch_entry_bitmanip)) v << 3U) & 8U));
2450 }
2451
2452 enum ulong abort_min = cast(ulong) 0U;
2453 enum ulong abort_max = cast(ulong) 1U;
2454 ///
2455 @property ulong cycles() @safe pure nothrow @nogc const
2456 {
2457 auto result = (perf_branch_entry_bitmanip & 1048560U) >> 4U;
2458 return cast(ulong) result;
2459 }
2460 ///
2461 @property void cycles(ulong v) @safe pure nothrow @nogc
2462 {
2463 assert(v >= cycles_min,
2464 "Value is smaller than the minimum value of bitfield 'cycles'");
2465 assert(v <= cycles_max,
2466 "Value is greater than the maximum value of bitfield 'cycles'");
2467 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
2468 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 1048560U)) | (
2469 (cast(typeof(perf_branch_entry_bitmanip)) v << 4U) & 1048560U));
2470 }
2471
2472 enum ulong cycles_min = cast(ulong) 0U;
2473 enum ulong cycles_max = cast(ulong) 65535U;
2474 ///
2475 @property ulong type() @safe pure nothrow @nogc const
2476 {
2477 auto result = (perf_branch_entry_bitmanip & 15728640U) >> 20U;
2478 return cast(ulong) result;
2479 }
2480 ///
2481 @property void type(ulong v) @safe pure nothrow @nogc
2482 {
2483 assert(v >= type_min, "Value is smaller than the minimum value of bitfield 'type'");
2484 assert(v <= type_max, "Value is greater than the maximum value of bitfield 'type'");
2485 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
2486 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 15728640U)) | (
2487 (cast(typeof(perf_branch_entry_bitmanip)) v << 20U) & 15728640U));
2488 }
2489
2490 enum ulong type_min = cast(ulong) 0U;
2491 enum ulong type_max = cast(ulong) 15U;
2492 ///
2493 @property ulong reserved() @safe pure nothrow @nogc const
2494 {
2495 auto result = (perf_branch_entry_bitmanip & 18446744073692774400UL) >> 24U;
2496 return cast(ulong) result;
2497 }
2498 ///
2499 @property void reserved(ulong v) @safe pure nothrow @nogc
2500 {
2501 assert(v >= reserved_min,
2502 "Value is smaller than the minimum value of bitfield 'reserved'");
2503 assert(v <= reserved_max,
2504 "Value is greater than the maximum value of bitfield 'reserved'");
2505 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))(
2506 (perf_branch_entry_bitmanip & (-1 - cast(
2507 typeof(perf_branch_entry_bitmanip)) 18446744073692774400UL)) | (
2508 (cast(typeof(perf_branch_entry_bitmanip)) v << 24U) & 18446744073692774400UL));
2509 }
2510
2511 enum ulong reserved_min = cast(ulong) 0U;
2512 enum ulong reserved_max = cast(ulong) 1099511627775UL;
2513 }
2514