1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file contains preset event names from the Performance Application 28 * Programming Interface v3.5 which included the following notice: 29 * 30 * Copyright (c) 2005,6 31 * Innovative Computing Labs 32 * Computer Science Department, 33 * University of Tennessee, 34 * Knoxville, TN. 35 * All Rights Reserved. 36 * 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions are met: 40 * 41 * * Redistributions of source code must retain the above copyright notice, 42 * this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * * Neither the name of the University of Tennessee nor the names of its 47 * contributors may be used to endorse or promote products derived from 48 * this software without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 51 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 54 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 60 * POSSIBILITY OF SUCH DAMAGE. 61 * 62 * 63 * This open source software license conforms to the BSD License template. 64 */ 65 66 67 /* 68 * Performance Counter Back-End for Intel processors supporting Architectural 69 * Performance Monitoring. 70 */ 71 72 #include <sys/cpuvar.h> 73 #include <sys/param.h> 74 #include <sys/cpc_impl.h> 75 #include <sys/cpc_pcbe.h> 76 #include <sys/modctl.h> 77 #include <sys/inttypes.h> 78 #include <sys/systm.h> 79 #include <sys/cmn_err.h> 80 #include <sys/x86_archext.h> 81 #include <sys/sdt.h> 82 #include <sys/archsystm.h> 83 #include <sys/privregs.h> 84 #include <sys/ddi.h> 85 #include <sys/sunddi.h> 86 #include <sys/cred.h> 87 #include <sys/policy.h> 88 89 static int core_pcbe_init(void); 90 static uint_t core_pcbe_ncounters(void); 91 static const char *core_pcbe_impl_name(void); 92 static const char *core_pcbe_cpuref(void); 93 static char *core_pcbe_list_events(uint_t picnum); 94 static char *core_pcbe_list_attrs(void); 95 static uint64_t core_pcbe_event_coverage(char *event); 96 static uint64_t core_pcbe_overflow_bitmap(void); 97 static int core_pcbe_configure(uint_t picnum, char *event, uint64_t preset, 98 uint32_t flags, uint_t nattrs, kcpc_attr_t *attrs, void **data, 99 void *token); 100 static void core_pcbe_program(void *token); 101 static void core_pcbe_allstop(void); 102 static void core_pcbe_sample(void *token); 103 static void core_pcbe_free(void *config); 104 105 #define FALSE 0 106 #define TRUE 1 107 108 /* Counter Type */ 109 #define CORE_GPC 0 /* General-Purpose Counter (GPC) */ 110 #define CORE_FFC 1 /* Fixed-Function Counter (FFC) */ 111 112 /* MSR Addresses */ 113 #define GPC_BASE_PMC 0x00c1 /* First GPC */ 114 #define GPC_BASE_PES 0x0186 /* First GPC Event Select register */ 115 #define FFC_BASE_PMC 0x0309 /* First FFC */ 116 #define PERF_FIXED_CTR_CTRL 0x038d /* Used to enable/disable FFCs */ 117 #define PERF_GLOBAL_STATUS 0x038e /* Overflow status register */ 118 #define PERF_GLOBAL_CTRL 0x038f /* Used to enable/disable counting */ 119 #define PERF_GLOBAL_OVF_CTRL 0x0390 /* Used to clear overflow status */ 120 121 /* 122 * Processor Event Select register fields 123 */ 124 #define CORE_USR (1ULL << 16) /* Count while not in ring 0 */ 125 #define CORE_OS (1ULL << 17) /* Count while in ring 0 */ 126 #define CORE_EDGE (1ULL << 18) /* Enable edge detection */ 127 #define CORE_PC (1ULL << 19) /* Enable pin control */ 128 #define CORE_INT (1ULL << 20) /* Enable interrupt on overflow */ 129 #define CORE_EN (1ULL << 22) /* Enable counting */ 130 #define CORE_INV (1ULL << 23) /* Invert the CMASK */ 131 #define CORE_ANYTHR (1ULL << 21) /* Count event for any thread on core */ 132 133 #define CORE_UMASK_SHIFT 8 134 #define CORE_UMASK_MASK 0xffu 135 #define CORE_CMASK_SHIFT 24 136 #define CORE_CMASK_MASK 0xffu 137 138 /* 139 * Fixed-function counter attributes 140 */ 141 #define CORE_FFC_OS_EN (1ULL << 0) /* Count while not in ring 0 */ 142 #define CORE_FFC_USR_EN (1ULL << 1) /* Count while in ring 1 */ 143 #define CORE_FFC_ANYTHR (1ULL << 2) /* Count event for any thread on core */ 144 #define CORE_FFC_PMI (1ULL << 3) /* Enable interrupt on overflow */ 145 146 /* 147 * Number of bits for specifying each FFC's attributes in the control register 148 */ 149 #define CORE_FFC_ATTR_SIZE 4 150 151 /* 152 * CondChgd and OvfBuffer fields of global status and overflow control registers 153 */ 154 #define CONDCHGD (1ULL << 63) 155 #define OVFBUFFER (1ULL << 62) 156 #define MASK_CONDCHGD_OVFBUFFER (CONDCHGD | OVFBUFFER) 157 158 #define ALL_STOPPED 0ULL 159 160 #define BITMASK_XBITS(x) ((1ull << (x)) - 1ull) 161 162 /* 163 * Only the lower 32-bits can be written to in the general-purpose 164 * counters. The higher bits are extended from bit 31; all ones if 165 * bit 31 is one and all zeros otherwise. 166 * 167 * The fixed-function counters do not have this restriction. 168 */ 169 #define BITS_EXTENDED_FROM_31 (BITMASK_XBITS(width_gpc) & ~BITMASK_XBITS(31)) 170 171 #define WRMSR(msr, value) \ 172 wrmsr((msr), (value)); \ 173 DTRACE_PROBE2(wrmsr, uint64_t, (msr), uint64_t, (value)); 174 175 #define RDMSR(msr, value) \ 176 (value) = rdmsr((msr)); \ 177 DTRACE_PROBE2(rdmsr, uint64_t, (msr), uint64_t, (value)); 178 179 typedef struct core_pcbe_config { 180 uint64_t core_rawpic; 181 uint64_t core_ctl; /* Event Select bits */ 182 uint64_t core_pmc; /* Counter register address */ 183 uint64_t core_pes; /* Event Select register address */ 184 uint_t core_picno; 185 uint8_t core_pictype; /* CORE_GPC or CORE_FFC */ 186 } core_pcbe_config_t; 187 188 pcbe_ops_t core_pcbe_ops = { 189 PCBE_VER_1, /* pcbe_ver */ 190 CPC_CAP_OVERFLOW_INTERRUPT | CPC_CAP_OVERFLOW_PRECISE, /* pcbe_caps */ 191 core_pcbe_ncounters, /* pcbe_ncounters */ 192 core_pcbe_impl_name, /* pcbe_impl_name */ 193 core_pcbe_cpuref, /* pcbe_cpuref */ 194 core_pcbe_list_events, /* pcbe_list_events */ 195 core_pcbe_list_attrs, /* pcbe_list_attrs */ 196 core_pcbe_event_coverage, /* pcbe_event_coverage */ 197 core_pcbe_overflow_bitmap, /* pcbe_overflow_bitmap */ 198 core_pcbe_configure, /* pcbe_configure */ 199 core_pcbe_program, /* pcbe_program */ 200 core_pcbe_allstop, /* pcbe_allstop */ 201 core_pcbe_sample, /* pcbe_sample */ 202 core_pcbe_free /* pcbe_free */ 203 }; 204 205 struct nametable_core_uarch { 206 const char *name; 207 uint64_t restricted_bits; 208 uint8_t event_num; 209 }; 210 211 #define NT_END 0xFF 212 213 /* 214 * Counting an event for all cores or all bus agents requires cpc_cpu privileges 215 */ 216 #define ALL_CORES (1ULL << 15) 217 #define ALL_AGENTS (1ULL << 13) 218 219 struct generic_events { 220 const char *name; 221 uint8_t event_num; 222 uint8_t umask; 223 }; 224 225 static const struct generic_events cmn_generic_events[] = { 226 { "PAPI_tot_cyc", 0x3c, 0x00 }, /* cpu_clk_unhalted.thread_p/core */ 227 { "PAPI_tot_ins", 0xc0, 0x00 }, /* inst_retired.any_p */ 228 { "PAPI_br_ins", 0xc4, 0x0c }, /* br_inst_retired.taken */ 229 { "PAPI_br_msp", 0xc5, 0x00 }, /* br_inst_retired.mispred */ 230 { "PAPI_br_ntk", 0xc4, 0x03 }, 231 /* br_inst_retired.pred_not_taken|pred_taken */ 232 { "PAPI_br_prc", 0xc4, 0x05 }, 233 /* br_inst_retired.pred_not_taken|pred_taken */ 234 { "PAPI_hw_int", 0xc8, 0x00 }, /* hw_int_rvc */ 235 { "PAPI_tot_iis", 0xaa, 0x01 }, /* macro_insts.decoded */ 236 { "PAPI_l1_dca", 0x43, 0x01 }, /* l1d_all_ref */ 237 { "PAPI_l1_icm", 0x81, 0x00 }, /* l1i_misses */ 238 { "PAPI_l1_icr", 0x80, 0x00 }, /* l1i_reads */ 239 { "PAPI_l1_tcw", 0x41, 0x0f }, /* l1d_cache_st.mesi */ 240 { "PAPI_l2_stm", 0x2a, 0x41 }, /* l2_st.self.i_state */ 241 { "PAPI_l2_tca", 0x2e, 0x4f }, /* l2_rqsts.self.demand.mesi */ 242 { "PAPI_l2_tch", 0x2e, 0x4e }, /* l2_rqsts.mes */ 243 { "PAPI_l2_tcm", 0x2e, 0x41 }, /* l2_rqsts.self.demand.i_state */ 244 { "PAPI_l2_tcw", 0x2a, 0x4f }, /* l2_st.self.mesi */ 245 { "PAPI_ld_ins", 0xc0, 0x01 }, /* inst_retired.loads */ 246 { "PAPI_lst_ins", 0xc0, 0x03 }, /* inst_retired.loads|stores */ 247 { "PAPI_sr_ins", 0xc0, 0x02 }, /* inst_retired.stores */ 248 { "PAPI_tlb_dm", 0x08, 0x01 }, /* dtlb_misses.any */ 249 { "PAPI_tlb_im", 0x82, 0x12 }, /* itlb.small_miss|large_miss */ 250 { "PAPI_tlb_tl", 0x0c, 0x03 }, /* page_walks */ 251 { "", NT_END, 0 } 252 }; 253 254 static const struct generic_events generic_events_pic0[] = { 255 { "PAPI_l1_dcm", 0xcb, 0x01 }, /* mem_load_retired.l1d_miss */ 256 { "", NT_END, 0 } 257 }; 258 259 /* 260 * The events listed in the following table can be counted on all 261 * general-purpose counters on processors that are of Penryn and Merom Family 262 */ 263 static const struct nametable_core_uarch cmn_gpc_events_core_uarch[] = { 264 /* Alphabetical order of event name */ 265 266 { "baclears", 0x0, 0xe6 }, 267 { "bogus_br", 0x0, 0xe4 }, 268 { "br_bac_missp_exec", 0x0, 0x8a }, 269 270 { "br_call_exec", 0x0, 0x92 }, 271 { "br_call_missp_exec", 0x0, 0x93 }, 272 { "br_cnd_exec", 0x0, 0x8b }, 273 274 { "br_cnd_missp_exec", 0x0, 0x8c }, 275 { "br_ind_call_exec", 0x0, 0x94 }, 276 { "br_ind_exec", 0x0, 0x8d }, 277 278 { "br_ind_missp_exec", 0x0, 0x8e }, 279 { "br_inst_decoded", 0x0, 0xe0 }, 280 { "br_inst_exec", 0x0, 0x88 }, 281 282 { "br_inst_retired", 0x0, 0xc4 }, 283 { "br_inst_retired_mispred", 0x0, 0xc5 }, 284 { "br_missp_exec", 0x0, 0x89 }, 285 286 { "br_ret_bac_missp_exec", 0x0, 0x91 }, 287 { "br_ret_exec", 0x0, 0x8f }, 288 { "br_ret_missp_exec", 0x0, 0x90 }, 289 290 { "br_tkn_bubble_1", 0x0, 0x97 }, 291 { "br_tkn_bubble_2", 0x0, 0x98 }, 292 { "bus_bnr_drv", ALL_AGENTS, 0x61 }, 293 294 { "bus_data_rcv", ALL_CORES, 0x64 }, 295 { "bus_drdy_clocks", ALL_AGENTS, 0x62 }, 296 { "bus_hit_drv", ALL_AGENTS, 0x7a }, 297 298 { "bus_hitm_drv", ALL_AGENTS, 0x7b }, 299 { "bus_io_wait", ALL_CORES, 0x7f }, 300 { "bus_lock_clocks", ALL_CORES | ALL_AGENTS, 0x63 }, 301 302 { "bus_request_outstanding", ALL_CORES | ALL_AGENTS, 0x60 }, 303 { "bus_trans_any", ALL_CORES | ALL_AGENTS, 0x70 }, 304 { "bus_trans_brd", ALL_CORES | ALL_AGENTS, 0x65 }, 305 306 { "bus_trans_burst", ALL_CORES | ALL_AGENTS, 0x6e }, 307 { "bus_trans_def", ALL_CORES | ALL_AGENTS, 0x6d }, 308 { "bus_trans_ifetch", ALL_CORES | ALL_AGENTS, 0x68 }, 309 310 { "bus_trans_inval", ALL_CORES | ALL_AGENTS, 0x69 }, 311 { "bus_trans_io", ALL_CORES | ALL_AGENTS, 0x6c }, 312 { "bus_trans_mem", ALL_CORES | ALL_AGENTS, 0x6f }, 313 314 { "bus_trans_p", ALL_CORES | ALL_AGENTS, 0x6b }, 315 { "bus_trans_pwr", ALL_CORES | ALL_AGENTS, 0x6a }, 316 { "bus_trans_rfo", ALL_CORES | ALL_AGENTS, 0x66 }, 317 318 { "bus_trans_wb", ALL_CORES | ALL_AGENTS, 0x67 }, 319 { "busq_empty", ALL_CORES, 0x7d }, 320 { "cmp_snoop", ALL_CORES, 0x78 }, 321 322 { "cpu_clk_unhalted", 0x0, 0x3c }, 323 { "cycles_int", 0x0, 0xc6 }, 324 { "cycles_l1i_mem_stalled", 0x0, 0x86 }, 325 326 { "dtlb_misses", 0x0, 0x08 }, 327 { "eist_trans", 0x0, 0x3a }, 328 { "esp", 0x0, 0xab }, 329 330 { "ext_snoop", ALL_AGENTS, 0x77 }, 331 { "fp_mmx_trans", 0x0, 0xcc }, 332 { "hw_int_rcv", 0x0, 0xc8 }, 333 334 { "ild_stall", 0x0, 0x87 }, 335 { "inst_queue", 0x0, 0x83 }, 336 { "inst_retired", 0x0, 0xc0 }, 337 338 { "itlb", 0x0, 0x82 }, 339 { "itlb_miss_retired", 0x0, 0xc9 }, 340 { "l1d_all_ref", 0x0, 0x43 }, 341 342 { "l1d_cache_ld", 0x0, 0x40 }, 343 { "l1d_cache_lock", 0x0, 0x42 }, 344 { "l1d_cache_st", 0x0, 0x41 }, 345 346 { "l1d_m_evict", 0x0, 0x47 }, 347 { "l1d_m_repl", 0x0, 0x46 }, 348 { "l1d_pend_miss", 0x0, 0x48 }, 349 350 { "l1d_prefetch", 0x0, 0x4e }, 351 { "l1d_repl", 0x0, 0x45 }, 352 { "l1d_split", 0x0, 0x49 }, 353 354 { "l1i_misses", 0x0, 0x81 }, 355 { "l1i_reads", 0x0, 0x80 }, 356 { "l2_ads", ALL_CORES, 0x21 }, 357 358 { "l2_dbus_busy_rd", ALL_CORES, 0x23 }, 359 { "l2_ifetch", ALL_CORES, 0x28 }, 360 { "l2_ld", ALL_CORES, 0x29 }, 361 362 { "l2_lines_in", ALL_CORES, 0x24 }, 363 { "l2_lines_out", ALL_CORES, 0x26 }, 364 { "l2_lock", ALL_CORES, 0x2b }, 365 366 { "l2_m_lines_in", ALL_CORES, 0x25 }, 367 { "l2_m_lines_out", ALL_CORES, 0x27 }, 368 { "l2_no_req", ALL_CORES, 0x32 }, 369 370 { "l2_reject_busq", ALL_CORES, 0x30 }, 371 { "l2_rqsts", ALL_CORES, 0x2e }, 372 { "l2_st", ALL_CORES, 0x2a }, 373 374 { "load_block", 0x0, 0x03 }, 375 { "load_hit_pre", 0x0, 0x4c }, 376 { "machine_nukes", 0x0, 0xc3 }, 377 378 { "macro_insts", 0x0, 0xaa }, 379 { "memory_disambiguation", 0x0, 0x09 }, 380 { "misalign_mem_ref", 0x0, 0x05 }, 381 { "page_walks", 0x0, 0x0c }, 382 383 { "pref_rqsts_dn", 0x0, 0xf8 }, 384 { "pref_rqsts_up", 0x0, 0xf0 }, 385 { "rat_stalls", 0x0, 0xd2 }, 386 387 { "resource_stalls", 0x0, 0xdc }, 388 { "rs_uops_dispatched", 0x0, 0xa0 }, 389 { "seg_reg_renames", 0x0, 0xd5 }, 390 391 { "seg_rename_stalls", 0x0, 0xd4 }, 392 { "segment_reg_loads", 0x0, 0x06 }, 393 { "simd_assist", 0x0, 0xcd }, 394 395 { "simd_comp_inst_retired", 0x0, 0xca }, 396 { "simd_inst_retired", 0x0, 0xc7 }, 397 { "simd_instr_retired", 0x0, 0xce }, 398 399 { "simd_sat_instr_retired", 0x0, 0xcf }, 400 { "simd_sat_uop_exec", 0x0, 0xb1 }, 401 { "simd_uop_type_exec", 0x0, 0xb3 }, 402 403 { "simd_uops_exec", 0x0, 0xb0 }, 404 { "snoop_stall_drv", ALL_CORES | ALL_AGENTS, 0x7e }, 405 { "sse_pre_exec", 0x0, 0x07 }, 406 407 { "sse_pre_miss", 0x0, 0x4b }, 408 { "store_block", 0x0, 0x04 }, 409 { "thermal_trip", 0x0, 0x3b }, 410 411 { "uops_retired", 0x0, 0xc2 }, 412 { "x87_ops_retired", 0x0, 0xc1 }, 413 { "", 0x0, NT_END } 414 }; 415 416 /* 417 * If any of the pic specific events require privileges, make sure to add a 418 * check in configure_gpc() to find whether an event hard-coded as a number by 419 * the user has any privilege requirements 420 */ 421 static const struct nametable_core_uarch pic0_events[] = { 422 /* Alphabetical order of event name */ 423 424 { "cycles_div_busy", 0x0, 0x14 }, 425 { "fp_comp_ops_exe", 0x0, 0x10 }, 426 { "idle_during_div", 0x0, 0x18 }, 427 428 { "mem_load_retired", 0x0, 0xcb }, 429 { "rs_uops_dispatched_port", 0x0, 0xa1 }, 430 { "", 0x0, NT_END } 431 }; 432 433 static const struct nametable_core_uarch pic1_events[] = { 434 /* Alphabetical order of event name */ 435 436 { "delayed_bypass", 0x0, 0x19 }, 437 { "div", 0x0, 0x13 }, 438 { "fp_assist", 0x0, 0x11 }, 439 440 { "mul", 0x0, 0x12 }, 441 { "", 0x0, NT_END } 442 }; 443 444 /* FFC entries must be in order */ 445 static char *ffc_names_non_htt[] = { 446 "instr_retired.any", 447 "cpu_clk_unhalted.core", 448 "cpu_clk_unhalted.ref", 449 NULL 450 }; 451 452 static char *ffc_names_htt[] = { 453 "instr_retired.any", 454 "cpu_clk_unhalted.thread", 455 "cpu_clk_unhalted.ref", 456 NULL 457 }; 458 459 static char *ffc_genericnames[] = { 460 "PAPI_tot_ins", 461 "PAPI_tot_cyc", 462 "", 463 NULL 464 }; 465 466 static char **ffc_names = NULL; 467 static char **ffc_allnames = NULL; 468 static char **gpc_names = NULL; 469 static uint32_t versionid; 470 static uint64_t num_gpc; 471 static uint64_t width_gpc; 472 static uint64_t mask_gpc; 473 static uint64_t num_ffc; 474 static uint64_t width_ffc; 475 static uint64_t mask_ffc; 476 static uint_t total_pmc; 477 static uint64_t control_ffc; 478 static uint64_t control_gpc; 479 static uint64_t control_mask; 480 static uint32_t arch_events_vector; 481 482 #define IMPL_NAME_LEN 100 483 static char core_impl_name[IMPL_NAME_LEN]; 484 485 static const char *core_cpuref = 486 "See Appendix A of the \"Intel 64 and IA-32 Architectures Software" \ 487 " Developer's Manual Volume 3B: System Programming Guide, Part 2\"" \ 488 " Order Number: 253669-026US, Februrary 2008"; 489 490 struct events_table_t { 491 uint8_t eventselect; 492 uint8_t unitmask; 493 uint64_t supported_counters; 494 const char *name; 495 }; 496 497 /* Used to describe which counters support an event */ 498 #define C(x) (1 << (x)) 499 #define C0 C(0) 500 #define C1 C(1) 501 #define C2 C(2) 502 #define C3 C(3) 503 #define C_ALL 0xFFFFFFFFFFFFFFFF 504 505 /* Architectural events */ 506 #define ARCH_EVENTS_COMMON \ 507 { 0xc0, 0x00, C_ALL, "inst_retired.any_p" }, \ 508 { 0x3c, 0x01, C_ALL, "cpu_clk_unhalted.ref_p" }, \ 509 { 0x2e, 0x4f, C_ALL, "longest_lat_cache.reference" }, \ 510 { 0x2e, 0x41, C_ALL, "longest_lat_cache.miss" }, \ 511 { 0xc4, 0x00, C_ALL, "br_inst_retired.all_branches" }, \ 512 { 0xc5, 0x00, C_ALL, "br_misp_retired.all_branches" } 513 514 static const struct events_table_t arch_events_table_non_htt[] = { 515 { 0x3c, 0x00, C_ALL, "cpu_clk_unhalted.core" }, 516 ARCH_EVENTS_COMMON 517 }; 518 519 static const struct events_table_t arch_events_table_htt[] = { 520 { 0x3c, 0x00, C_ALL, "cpu_clk_unhalted.thread_p" }, 521 ARCH_EVENTS_COMMON 522 }; 523 524 static char *arch_genevents_table[] = { 525 "PAPI_tot_cyc", /* cpu_clk_unhalted.thread_p/core */ 526 "PAPI_tot_ins", /* inst_retired.any_p */ 527 "", /* cpu_clk_unhalted.ref_p */ 528 "", /* longest_lat_cache.reference */ 529 "", /* longest_lat_cache.miss */ 530 "", /* br_inst_retired.all_branches */ 531 "", /* br_misp_retired.all_branches */ 532 }; 533 534 static const struct events_table_t *arch_events_table = NULL; 535 static uint64_t known_arch_events; 536 static uint64_t known_ffc_num; 537 538 #define GENERICEVENTS_FAM6_MOD26 \ 539 { 0xc4, 0x01, C0|C1|C2|C3, "PAPI_br_cn" }, /* br_inst_retired.conditional */ \ 540 { 0x1d, 0x01, C0|C1|C2|C3, "PAPI_hw_int" }, /* hw_int.rcx */ \ 541 { 0x17, 0x01, C0|C1|C2|C3, "PAPI_tot_iis" }, /* inst_queue_writes */ \ 542 { 0x43, 0x01, C0|C1, "PAPI_l1_dca" }, /* l1d_all_ref.any */ \ 543 { 0x24, 0x03, C0|C1|C2|C3, "PAPI_l1_dcm" }, /* l2_rqsts. loads and rfos */ \ 544 { 0x40, 0x0f, C0|C1|C2|C3, "PAPI_l1_dcr" }, /* l1d_cache_ld.mesi */ \ 545 { 0x41, 0x0f, C0|C1|C2|C3, "PAPI_l1_dcw" }, /* l1d_cache_st.mesi */ \ 546 { 0x80, 0x03, C0|C1|C2|C3, "PAPI_l1_ica" }, /* l1i.reads */ \ 547 { 0x80, 0x01, C0|C1|C2|C3, "PAPI_l1_ich" }, /* l1i.hits */ \ 548 { 0x80, 0x02, C0|C1|C2|C3, "PAPI_l1_icm" }, /* l1i.misses */ \ 549 { 0x80, 0x03, C0|C1|C2|C3, "PAPI_l1_icr" }, /* l1i.reads */ \ 550 { 0x24, 0x33, C0|C1|C2|C3, "PAPI_l1_ldm" }, /* l2_rqsts. loads and ifetches */\ 551 { 0x24, 0xff, C0|C1|C2|C3, "PAPI_l1_tcm" }, /* l2_rqsts.references */ \ 552 { 0x24, 0x02, C0|C1|C2|C3, "PAPI_l2_ldm" }, /* l2_rqsts.ld_miss */ \ 553 { 0x24, 0x08, C0|C1|C2|C3, "PAPI_l2_stm" }, /* l2_rqsts.rfo_miss */ \ 554 { 0x24, 0x3f, C0|C1|C2|C3, "PAPI_l2_tca" }, \ 555 /* l2_rqsts. loads, rfos and ifetches */ \ 556 { 0x24, 0x15, C0|C1|C2|C3, "PAPI_l2_tch" }, \ 557 /* l2_rqsts. ld_hit, rfo_hit and ifetch_hit */ \ 558 { 0x24, 0x2a, C0|C1|C2|C3, "PAPI_l2_tcm" }, \ 559 /* l2_rqsts. ld_miss, rfo_miss and ifetch_miss */ \ 560 { 0x24, 0x33, C0|C1|C2|C3, "PAPI_l2_tcr" }, /* l2_rqsts. loads and ifetches */\ 561 { 0x24, 0x0c, C0|C1|C2|C3, "PAPI_l2_tcw" }, /* l2_rqsts.rfos */ \ 562 { 0x2e, 0x4f, C0|C1|C2|C3, "PAPI_l3_tca" }, /* l3_lat_cache.reference */ \ 563 { 0x2e, 0x41, C0|C1|C2|C3, "PAPI_l3_tcm" }, /* l3_lat_cache.misses */ \ 564 { 0x0b, 0x01, C0|C1|C2|C3, "PAPI_ld_ins" }, /* mem_inst_retired.loads */ \ 565 { 0x0b, 0x03, C0|C1|C2|C3, "PAPI_lst_ins" }, \ 566 /* mem_inst_retired.loads and stores */ \ 567 { 0x26, 0xf0, C0|C1|C2|C3, "PAPI_prf_dm" }, /* l2_data_rqsts.prefetch.mesi */ \ 568 { 0x0b, 0x02, C0|C1|C2|C3, "PAPI_sr_ins" }, /* mem_inst_retired.stores */ \ 569 { 0x49, 0x01, C0|C1|C2|C3, "PAPI_tlb_dm" }, /* dtlb_misses.any */ \ 570 { 0x85, 0x01, C0|C1|C2|C3, "PAPI_tlb_im" } /* itlb_misses.any */ 571 572 573 #define EVENTS_FAM6_MOD26 \ 574 \ 575 { 0x80, 0x04, C0|C1|C2|C3, "l1i.cycles_stalled" }, \ 576 { 0x80, 0x01, C0|C1|C2|C3, "l1i.hits" }, \ 577 { 0x80, 0x02, C0|C1|C2|C3, "l1i.misses" }, \ 578 \ 579 { 0x80, 0x03, C0|C1|C2|C3, "l1i.reads" }, \ 580 { 0x82, 0x01, C0|C1|C2|C3, "large_itlb.hit" }, \ 581 { 0x87, 0x0F, C0|C1|C2|C3, "ild_stall.any" }, \ 582 \ 583 { 0x87, 0x04, C0|C1|C2|C3, "ild_stall.iq_full" }, \ 584 { 0x87, 0x01, C0|C1|C2|C3, "ild_stall.lcp" }, \ 585 { 0x87, 0x02, C0|C1|C2|C3, "ild_stall.mru" }, \ 586 \ 587 { 0x87, 0x08, C0|C1|C2|C3, "ild_stall.regen" }, \ 588 { 0xE6, 0x02, C0|C1|C2|C3, "baclear.bad_target" }, \ 589 { 0xE6, 0x01, C0|C1|C2|C3, "baclear.clear" }, \ 590 \ 591 { 0xE8, 0x01, C0|C1|C2|C3, "bpu_clears.early" }, \ 592 { 0xE8, 0x02, C0|C1|C2|C3, "bpu_clears.late" }, \ 593 { 0xE5, 0x01, C0|C1|C2|C3, "bpu_missed_call_ret" }, \ 594 \ 595 { 0xE0, 0x01, C0|C1|C2|C3, "br_inst_decoded" }, \ 596 { 0x88, 0x7F, C0|C1|C2|C3, "br_inst_exec.any" }, \ 597 { 0x88, 0x01, C0|C1|C2|C3, "br_inst_exec.cond" }, \ 598 \ 599 { 0x88, 0x02, C0|C1|C2|C3, "br_inst_exec.direct" }, \ 600 { 0x88, 0x10, C0|C1|C2|C3, "br_inst_exec.direct_near_call" }, \ 601 { 0x88, 0x20, C0|C1|C2|C3, "br_inst_exec.indirect_near_call" }, \ 602 \ 603 { 0x88, 0x04, C0|C1|C2|C3, "br_inst_exec.indirect_non_call" }, \ 604 { 0x88, 0x30, C0|C1|C2|C3, "br_inst_exec.near_calls" }, \ 605 { 0x88, 0x07, C0|C1|C2|C3, "br_inst_exec.non_calls" }, \ 606 \ 607 { 0x88, 0x08, C0|C1|C2|C3, "br_inst_exec.return_near" }, \ 608 { 0x88, 0x40, C0|C1|C2|C3, "br_inst_exec.taken" }, \ 609 { 0x89, 0x7F, C0|C1|C2|C3, "br_misp_exec.any" }, \ 610 \ 611 { 0x89, 0x01, C0|C1|C2|C3, "br_misp_exec.cond" }, \ 612 { 0x89, 0x02, C0|C1|C2|C3, "br_misp_exec.direct" }, \ 613 { 0x89, 0x10, C0|C1|C2|C3, "br_misp_exec.direct_near_call" }, \ 614 \ 615 { 0x89, 0x20, C0|C1|C2|C3, "br_misp_exec.indirect_near_call" }, \ 616 { 0x89, 0x04, C0|C1|C2|C3, "br_misp_exec.indirect_non_call" }, \ 617 { 0x89, 0x30, C0|C1|C2|C3, "br_misp_exec.near_calls" }, \ 618 \ 619 { 0x89, 0x07, C0|C1|C2|C3, "br_misp_exec.non_calls" }, \ 620 { 0x89, 0x08, C0|C1|C2|C3, "br_misp_exec.return_near" }, \ 621 { 0x89, 0x40, C0|C1|C2|C3, "br_misp_exec.taken" }, \ 622 \ 623 { 0x17, 0x01, C0|C1|C2|C3, "inst_queue_writes" }, \ 624 { 0x1E, 0x01, C0|C1|C2|C3, "inst_queue_write_cycles" }, \ 625 { 0xA7, 0x01, C0|C1|C2|C3, "baclear_force_iq" }, \ 626 \ 627 { 0xD0, 0x01, C0|C1|C2|C3, "macro_insts.decoded" }, \ 628 { 0xA6, 0x01, C0|C1|C2|C3, "macro_insts.fusions_decoded" }, \ 629 { 0x19, 0x01, C0|C1|C2|C3, "two_uop_insts_decoded" }, \ 630 \ 631 { 0x18, 0x01, C0|C1|C2|C3, "inst_decoded.dec0" }, \ 632 { 0xD1, 0x04, C0|C1|C2|C3, "uops_decoded.esp_folding" }, \ 633 { 0xD1, 0x08, C0|C1|C2|C3, "uops_decoded.esp_sync" }, \ 634 \ 635 { 0xD1, 0x02, C0|C1|C2|C3, "uops_decoded.ms" }, \ 636 { 0x20, 0x01, C0|C1|C2|C3, "lsd_overflow" }, \ 637 { 0x0E, 0x01, C0|C1|C2|C3, "uops_issued.any" }, \ 638 \ 639 { 0x0E, 0x02, C0|C1|C2|C3, "uops_issued.fused" }, \ 640 { 0xA2, 0x20, C0|C1|C2|C3, "resource_stalls.fpcw" }, \ 641 { 0xA2, 0x02, C0|C1|C2|C3, "resource_stalls.load" }, \ 642 \ 643 { 0xA2, 0x40, C0|C1|C2|C3, "resource_stalls.mxcsr" }, \ 644 { 0xA2, 0x04, C0|C1|C2|C3, "resource_stalls.rs_full" }, \ 645 { 0xA2, 0x08, C0|C1|C2|C3, "resource_stalls.store" }, \ 646 \ 647 { 0xA2, 0x01, C0|C1|C2|C3, "resource_stalls.any" }, \ 648 { 0xD2, 0x01, C0|C1|C2|C3, "rat_stalls.flags" }, \ 649 { 0xD2, 0x02, C0|C1|C2|C3, "rat_stalls.registers" }, \ 650 \ 651 { 0xD2, 0x04, C0|C1|C2|C3, "rat_stalls.rob_read_port" }, \ 652 { 0xD2, 0x08, C0|C1|C2|C3, "rat_stalls.scoreboard" }, \ 653 { 0xD2, 0x0F, C0|C1|C2|C3, "rat_stalls.any" }, \ 654 \ 655 { 0xD4, 0x01, C0|C1|C2|C3, "seg_rename_stalls" }, \ 656 { 0xD5, 0x01, C0|C1|C2|C3, "es_reg_renames" }, \ 657 { 0x10, 0x02, C0|C1|C2|C3, "fp_comp_ops_exe.mmx" }, \ 658 \ 659 { 0x10, 0x80, C0|C1|C2|C3, "fp_comp_ops_exe.sse_double_precision" }, \ 660 { 0x10, 0x04, C0|C1|C2|C3, "fp_comp_ops_exe.sse_fp" }, \ 661 { 0x10, 0x10, C0|C1|C2|C3, "fp_comp_ops_exe.sse_fp_packed" }, \ 662 \ 663 { 0x10, 0x20, C0|C1|C2|C3, "fp_comp_ops_exe.sse_fp_scalar" }, \ 664 { 0x10, 0x40, C0|C1|C2|C3, "fp_comp_ops_exe.sse_single_precision" }, \ 665 { 0x10, 0x08, C0|C1|C2|C3, "fp_comp_ops_exe.sse2_integer" }, \ 666 \ 667 { 0x10, 0x01, C0|C1|C2|C3, "fp_comp_ops_exe.x87" }, \ 668 { 0x14, 0x01, C0|C1|C2|C3, "arith.cycles_div_busy" }, \ 669 { 0x14, 0x02, C0|C1|C2|C3, "arith.mul" }, \ 670 \ 671 { 0x12, 0x04, C0|C1|C2|C3, "simd_int_128.pack" }, \ 672 { 0x12, 0x20, C0|C1|C2|C3, "simd_int_128.packed_arith" }, \ 673 { 0x12, 0x10, C0|C1|C2|C3, "simd_int_128.packed_logical" }, \ 674 \ 675 { 0x12, 0x01, C0|C1|C2|C3, "simd_int_128.packed_mpy" }, \ 676 { 0x12, 0x02, C0|C1|C2|C3, "simd_int_128.packed_shift" }, \ 677 { 0x12, 0x40, C0|C1|C2|C3, "simd_int_128.shuffle_move" }, \ 678 \ 679 { 0x12, 0x08, C0|C1|C2|C3, "simd_int_128.unpack" }, \ 680 { 0xFD, 0x04, C0|C1|C2|C3, "simd_int_64.pack" }, \ 681 { 0xFD, 0x20, C0|C1|C2|C3, "simd_int_64.packed_arith" }, \ 682 \ 683 { 0xFD, 0x10, C0|C1|C2|C3, "simd_int_64.packed_logical" }, \ 684 { 0xFD, 0x01, C0|C1|C2|C3, "simd_int_64.packed_mpy" }, \ 685 { 0xFD, 0x02, C0|C1|C2|C3, "simd_int_64.packed_shift" }, \ 686 \ 687 { 0xFD, 0x40, C0|C1|C2|C3, "simd_int_64.shuffle_move" }, \ 688 { 0xFD, 0x08, C0|C1|C2|C3, "simd_int_64.unpack" }, \ 689 { 0xB1, 0x01, C0|C1|C2|C3, "uops_executed.port0" }, \ 690 \ 691 { 0xB1, 0x02, C0|C1|C2|C3, "uops_executed.port1" }, \ 692 { 0x40, 0x04, C0|C1, "l1d_cache_ld.e_state" }, \ 693 { 0x40, 0x01, C0|C1, "l1d_cache_ld.i_state" }, \ 694 \ 695 { 0x40, 0x08, C0|C1, "l1d_cache_ld.m_state" }, \ 696 { 0x40, 0x0F, C0|C1, "l1d_cache_ld.mesi" }, \ 697 { 0x40, 0x02, C0|C1, "l1d_cache_ld.s_state" }, \ 698 \ 699 { 0x41, 0x04, C0|C1, "l1d_cache_st.e_state" }, \ 700 { 0x41, 0x08, C0|C1, "l1d_cache_st.m_state" }, \ 701 { 0x41, 0x0F, C0|C1, "l1d_cache_st.mesi" }, \ 702 \ 703 { 0x41, 0x02, C0|C1, "l1d_cache_st.s_state" }, \ 704 { 0x42, 0x04, C0|C1, "l1d_cache_lock.e_state" }, \ 705 { 0x42, 0x01, C0|C1, "l1d_cache_lock.hit" }, \ 706 \ 707 { 0x42, 0x08, C0|C1, "l1d_cache_lock.m_state" }, \ 708 { 0x42, 0x02, C0|C1, "l1d_cache_lock.s_state" }, \ 709 { 0x43, 0x01, C0|C1, "l1d_all_ref.any" }, \ 710 \ 711 { 0x43, 0x02, C0|C1, "l1d_all_ref.cacheable" }, \ 712 { 0x4B, 0x01, C0|C1, "mmx2_mem_exec.nta" }, \ 713 { 0x4C, 0x01, C0|C1, "load_hit_pre" }, \ 714 \ 715 { 0x4E, 0x02, C0|C1, "l1d_prefetch.miss" }, \ 716 { 0x4E, 0x01, C0|C1, "l1d_prefetch.requests" }, \ 717 { 0x4E, 0x04, C0|C1, "l1d_prefetch.triggers" }, \ 718 \ 719 { 0x51, 0x04, C0|C1, "l1d.m_evict" }, \ 720 { 0x51, 0x02, C0|C1, "l1d.m_repl" }, \ 721 { 0x51, 0x08, C0|C1, "l1d.m_snoop_evict" }, \ 722 \ 723 { 0x51, 0x01, C0|C1, "l1d.repl" }, \ 724 { 0x52, 0x01, C0|C1, "l1d_cache_prefetch_lock_fb_hit" }, \ 725 { 0x53, 0x01, C0|C1, "l1d_cache_lock_fb_hit" }, \ 726 \ 727 { 0x63, 0x02, C0|C1, "cache_lock_cycles.l1d" }, \ 728 { 0x63, 0x01, C0|C1, "cache_lock_cycles.l1d_l2" }, \ 729 { 0x06, 0x04, C0|C1|C2|C3, "store_blocks.at_ret" }, \ 730 \ 731 { 0x06, 0x08, C0|C1|C2|C3, "store_blocks.l1d_block" }, \ 732 { 0x06, 0x01, C0|C1|C2|C3, "store_blocks.not_sta" }, \ 733 { 0x06, 0x02, C0|C1|C2|C3, "store_blocks.sta" }, \ 734 \ 735 { 0x13, 0x07, C0|C1|C2|C3, "load_dispatch.any" }, \ 736 { 0x13, 0x04, C0|C1|C2|C3, "load_dispatch.mob" }, \ 737 { 0x13, 0x01, C0|C1|C2|C3, "load_dispatch.rs" }, \ 738 \ 739 { 0x13, 0x02, C0|C1|C2|C3, "load_dispatch.rs_delayed" }, \ 740 { 0x08, 0x01, C0|C1|C2|C3, "dtlb_load_misses.any" }, \ 741 { 0x08, 0x20, C0|C1|C2|C3, "dtlb_load_misses.pde_miss" }, \ 742 \ 743 { 0x08, 0x02, C0|C1|C2|C3, "dtlb_load_misses.walk_completed" }, \ 744 { 0x49, 0x01, C0|C1|C2|C3, "dtlb_misses.any" }, \ 745 { 0x49, 0x10, C0|C1|C2|C3, "dtlb_misses.stlb_hit" }, \ 746 \ 747 { 0x49, 0x02, C0|C1|C2|C3, "dtlb_misses.walk_completed" }, \ 748 { 0x4F, 0x02, C0|C1|C2|C3, "ept.epde_miss" }, \ 749 { 0x4F, 0x08, C0|C1|C2|C3, "ept.epdpe_miss" }, \ 750 \ 751 { 0x85, 0x01, C0|C1|C2|C3, "itlb_misses.any" }, \ 752 { 0x85, 0x02, C0|C1|C2|C3, "itlb_misses.walk_completed" }, \ 753 { 0x24, 0xAA, C0|C1|C2|C3, "l2_rqsts.miss" }, \ 754 \ 755 { 0x24, 0xFF, C0|C1|C2|C3, "l2_rqsts.references" }, \ 756 { 0x24, 0x10, C0|C1|C2|C3, "l2_rqsts.ifetch_hit" }, \ 757 { 0x24, 0x20, C0|C1|C2|C3, "l2_rqsts.ifetch_miss" }, \ 758 \ 759 { 0x24, 0x30, C0|C1|C2|C3, "l2_rqsts.ifetches" }, \ 760 { 0x24, 0x01, C0|C1|C2|C3, "l2_rqsts.ld_hit" }, \ 761 { 0x24, 0x02, C0|C1|C2|C3, "l2_rqsts.ld_miss" }, \ 762 \ 763 { 0x24, 0x03, C0|C1|C2|C3, "l2_rqsts.loads" }, \ 764 { 0x24, 0x40, C0|C1|C2|C3, "l2_rqsts.prefetch_hit" }, \ 765 { 0x24, 0x80, C0|C1|C2|C3, "l2_rqsts.prefetch_miss" }, \ 766 \ 767 { 0x24, 0xC0, C0|C1|C2|C3, "l2_rqsts.prefetches" }, \ 768 { 0x24, 0x04, C0|C1|C2|C3, "l2_rqsts.rfo_hit" }, \ 769 { 0x24, 0x08, C0|C1|C2|C3, "l2_rqsts.rfo_miss" }, \ 770 \ 771 { 0x24, 0x0C, C0|C1|C2|C3, "l2_rqsts.rfos" }, \ 772 { 0x26, 0xFF, C0|C1|C2|C3, "l2_data_rqsts.any" }, \ 773 { 0x26, 0x04, C0|C1|C2|C3, "l2_data_rqsts.demand.e_state" }, \ 774 \ 775 { 0x26, 0x01, C0|C1|C2|C3, "l2_data_rqsts.demand.i_state" }, \ 776 { 0x26, 0x08, C0|C1|C2|C3, "l2_data_rqsts.demand.m_state" }, \ 777 { 0x26, 0x0F, C0|C1|C2|C3, "l2_data_rqsts.demand.mesi" }, \ 778 \ 779 { 0x26, 0x02, C0|C1|C2|C3, "l2_data_rqsts.demand.s_state" }, \ 780 { 0x26, 0x40, C0|C1|C2|C3, "l2_data_rqsts.prefetch.e_state" }, \ 781 { 0x26, 0x10, C0|C1|C2|C3, "l2_data_rqsts.prefetch.i_state" }, \ 782 \ 783 { 0x26, 0x80, C0|C1|C2|C3, "l2_data_rqsts.prefetch.m_state" }, \ 784 { 0x26, 0xF0, C0|C1|C2|C3, "l2_data_rqsts.prefetch.mesi" }, \ 785 { 0x26, 0x20, C0|C1|C2|C3, "l2_data_rqsts.prefetch.s_state" }, \ 786 \ 787 { 0x27, 0x40, C0|C1|C2|C3, "l2_write.lock.e_state" }, \ 788 { 0x27, 0x10, C0|C1|C2|C3, "l2_write.lock.i_state" }, \ 789 { 0x27, 0x20, C0|C1|C2|C3, "l2_write.lock.s_state" }, \ 790 \ 791 { 0x27, 0x0E, C0|C1|C2|C3, "l2_write.rfo.hit" }, \ 792 { 0x27, 0x01, C0|C1|C2|C3, "l2_write.rfo.i_state" }, \ 793 { 0x27, 0x08, C0|C1|C2|C3, "l2_write.rfo.m_state" }, \ 794 \ 795 { 0x27, 0x0F, C0|C1|C2|C3, "l2_write.rfo.mesi" }, \ 796 { 0x27, 0x02, C0|C1|C2|C3, "l2_write.rfo.s_state" }, \ 797 { 0x28, 0x04, C0|C1|C2|C3, "l1d_wb_l2.e_state" }, \ 798 \ 799 { 0x28, 0x01, C0|C1|C2|C3, "l1d_wb_l2.i_state" }, \ 800 { 0x28, 0x08, C0|C1|C2|C3, "l1d_wb_l2.m_state" }, \ 801 { 0xF0, 0x80, C0|C1|C2|C3, "l2_transactions.any" }, \ 802 \ 803 { 0xF0, 0x20, C0|C1|C2|C3, "l2_transactions.fill" }, \ 804 { 0xF0, 0x04, C0|C1|C2|C3, "l2_transactions.ifetch" }, \ 805 { 0xF0, 0x10, C0|C1|C2|C3, "l2_transactions.l1d_wb" }, \ 806 \ 807 { 0xF0, 0x01, C0|C1|C2|C3, "l2_transactions.load" }, \ 808 { 0xF0, 0x08, C0|C1|C2|C3, "l2_transactions.prefetch" }, \ 809 { 0xF0, 0x02, C0|C1|C2|C3, "l2_transactions.rfo" }, \ 810 \ 811 { 0xF0, 0x40, C0|C1|C2|C3, "l2_transactions.wb" }, \ 812 { 0xF1, 0x07, C0|C1|C2|C3, "l2_lines_in.any" }, \ 813 { 0xF1, 0x04, C0|C1|C2|C3, "l2_lines_in.e_state" }, \ 814 \ 815 { 0xF1, 0x02, C0|C1|C2|C3, "l2_lines_in.s_state" }, \ 816 { 0xF2, 0x0F, C0|C1|C2|C3, "l2_lines_out.any" }, \ 817 { 0xF2, 0x01, C0|C1|C2|C3, "l2_lines_out.demand_clean" }, \ 818 \ 819 { 0xF2, 0x02, C0|C1|C2|C3, "l2_lines_out.demand_dirty" }, \ 820 { 0xF2, 0x04, C0|C1|C2|C3, "l2_lines_out.prefetch_clean" }, \ 821 { 0x6C, 0x01, C0|C1|C2|C3, "io_transactions" }, \ 822 \ 823 { 0xB0, 0x80, C0|C1|C2|C3, "offcore_requests.any" }, \ 824 { 0xB0, 0x10, C0|C1|C2|C3, "offcore_requests.any.rfo" }, \ 825 { 0xB0, 0x40, C0|C1|C2|C3, "offcore_requests.l1d_writeback" }, \ 826 \ 827 { 0xB8, 0x01, C0|C1|C2|C3, "snoop_response.hit" }, \ 828 { 0xB8, 0x02, C0|C1|C2|C3, "snoop_response.hite" }, \ 829 { 0xB8, 0x04, C0|C1|C2|C3, "snoop_response.hitm" }, \ 830 \ 831 { 0xF4, 0x10, C0|C1|C2|C3, "sq_misc.split_lock" }, \ 832 { 0x0B, 0x01, C0|C1|C2|C3, "mem_inst_retired.loads" }, \ 833 { 0x0B, 0x02, C0|C1|C2|C3, "mem_inst_retired.stores" }, \ 834 \ 835 { 0xC0, 0x04, C0|C1|C2|C3, "inst_retired.mmx" }, \ 836 { 0xC0, 0x02, C0|C1|C2|C3, "inst_retired.x87" }, \ 837 { 0xC7, 0x04, C0|C1|C2|C3, "ssex_uops_retired.packed_double" }, \ 838 \ 839 { 0xC7, 0x01, C0|C1|C2|C3, "ssex_uops_retired.packed_single" }, \ 840 { 0xC7, 0x08, C0|C1|C2|C3, "ssex_uops_retired.scalar_double" }, \ 841 { 0xC7, 0x02, C0|C1|C2|C3, "ssex_uops_retired.scalar_single" }, \ 842 \ 843 { 0xC7, 0x10, C0|C1|C2|C3, "ssex_uops_retired.vector_integer" }, \ 844 { 0xC2, 0x01, C0|C1|C2|C3, "uops_retired.any" }, \ 845 { 0xC2, 0x04, C0|C1|C2|C3, "uops_retired.macro_fused" }, \ 846 \ 847 { 0xC8, 0x20, C0|C1|C2|C3, "itlb_miss_retired" }, \ 848 { 0xCB, 0x80, C0|C1|C2|C3, "mem_load_retired.dtlb_miss" }, \ 849 { 0xCB, 0x40, C0|C1|C2|C3, "mem_load_retired.hit_lfb" }, \ 850 \ 851 { 0xCB, 0x01, C0|C1|C2|C3, "mem_load_retired.l1d_hit" }, \ 852 { 0xCB, 0x02, C0|C1|C2|C3, "mem_load_retired.l2_hit" }, \ 853 { 0xCB, 0x10, C0|C1|C2|C3, "mem_load_retired.llc_miss" }, \ 854 \ 855 { 0xCB, 0x04, C0|C1|C2|C3, "mem_load_retired.llc_unshared_hit" }, \ 856 { 0xCB, 0x08, C0|C1|C2|C3, "mem_load_retired.other_core_l2_hit_hitm" }, \ 857 { 0x0F, 0x02, C0|C1|C2|C3, "mem_uncore_retired.other_core_l2_hitm" }, \ 858 \ 859 { 0x0F, 0x08, C0|C1|C2|C3, "mem_uncore_retired.remote_cache_local_home_hit" },\ 860 { 0x0F, 0x10, C0|C1|C2|C3, "mem_uncore_retired.remote_dram" }, \ 861 { 0x0F, 0x20, C0|C1|C2|C3, "mem_uncore_retired.local_dram" }, \ 862 \ 863 { 0x0C, 0x01, C0|C1|C2|C3, "mem_store_retired.dtlb_miss" }, \ 864 { 0xC4, 0x01, C0|C1|C2|C3, "br_inst_retired.conditional" }, \ 865 { 0xC4, 0x02, C0|C1|C2|C3, "br_inst_retired.near_call" }, \ 866 \ 867 { 0xC5, 0x02, C0|C1|C2|C3, "br_misp_retired.near_call" }, \ 868 { 0xDB, 0x01, C0|C1|C2|C3, "uop_unfusion" }, \ 869 { 0xF7, 0x01, C0|C1|C2|C3, "fp_assist.all" }, \ 870 \ 871 { 0xF7, 0x04, C0|C1|C2|C3, "fp_assist.input" }, \ 872 { 0xF7, 0x02, C0|C1|C2|C3, "fp_assist.output" }, \ 873 { 0xCC, 0x03, C0|C1|C2|C3, "fp_mmx_trans.any" }, \ 874 \ 875 { 0xCC, 0x01, C0|C1|C2|C3, "fp_mmx_trans.to_fp" }, \ 876 { 0xCC, 0x02, C0|C1|C2|C3, "fp_mmx_trans.to_mmx" }, \ 877 { 0xC3, 0x04, C0|C1|C2|C3, "machine_clears.smc" } 878 879 #define GENERICEVENTS_FAM6_MOD28 \ 880 { 0xc4, 0x00, C0|C1, "PAPI_br_ins" }, /* br_inst_retired.any */ \ 881 { 0xc5, 0x00, C0|C1, "PAPI_br_msp" }, /* br_inst_retired.mispred */ \ 882 { 0xc4, 0x03, C0|C1, "PAPI_br_ntk" }, \ 883 /* br_inst_retired.pred_not_taken|mispred_not_taken */ \ 884 { 0xc4, 0x05, C0|C1, "PAPI_br_prc" }, \ 885 /* br_inst_retired.pred_not_taken|pred_taken */ \ 886 { 0xc8, 0x00, C0|C1, "PAPI_hw_int" }, /* hw_int_rcv */ \ 887 { 0xaa, 0x03, C0|C1, "PAPI_tot_iis" }, /* macro_insts.all_decoded */ \ 888 { 0x40, 0x23, C0|C1, "PAPI_l1_dca" }, /* l1d_cache.l1|st */ \ 889 { 0x2a, 0x41, C0|C1, "PAPI_l2_stm" }, /* l2_st.self.i_state */ \ 890 { 0x2e, 0x4f, C0|C1, "PAPI_l2_tca" }, /* longest_lat_cache.reference */ \ 891 { 0x2e, 0x4e, C0|C1, "PAPI_l2_tch" }, /* l2_rqsts.mes */ \ 892 { 0x2e, 0x41, C0|C1, "PAPI_l2_tcm" }, /* longest_lat_cache.miss */ \ 893 { 0x2a, 0x4f, C0|C1, "PAPI_l2_tcw" }, /* l2_st.self.mesi */ \ 894 { 0x08, 0x07, C0|C1, "PAPI_tlb_dm" }, /* data_tlb_misses.dtlb.miss */ \ 895 { 0x82, 0x02, C0|C1, "PAPI_tlb_im" } /* itlb.misses */ 896 897 898 #define EVENTS_FAM6_MOD28 \ 899 { 0x2, 0x81, C0|C1, "store_forwards.good" }, \ 900 { 0x6, 0x0, C0|C1, "segment_reg_loads.any" }, \ 901 { 0x7, 0x1, C0|C1, "prefetch.prefetcht0" }, \ 902 { 0x7, 0x6, C0|C1, "prefetch.sw_l2" }, \ 903 { 0x7, 0x8, C0|C1, "prefetch.prefetchnta" }, \ 904 { 0x8, 0x7, C0|C1, "data_tlb_misses.dtlb_miss" }, \ 905 { 0x8, 0x5, C0|C1, "data_tlb_misses.dtlb_miss_ld" }, \ 906 { 0x8, 0x9, C0|C1, "data_tlb_misses.l0_dtlb_miss_ld" }, \ 907 { 0x8, 0x6, C0|C1, "data_tlb_misses.dtlb_miss_st" }, \ 908 { 0xC, 0x3, C0|C1, "page_walks.cycles" }, \ 909 { 0x10, 0x1, C0|C1, "x87_comp_ops_exe.any.s" }, \ 910 { 0x10, 0x81, C0|C1, "x87_comp_ops_exe.any.ar" }, \ 911 { 0x11, 0x1, C0|C1, "fp_assist" }, \ 912 { 0x11, 0x81, C0|C1, "fp_assist.ar" }, \ 913 { 0x12, 0x1, C0|C1, "mul.s" }, \ 914 { 0x12, 0x81, C0|C1, "mul.ar" }, \ 915 { 0x13, 0x1, C0|C1, "div.s" }, \ 916 { 0x13, 0x81, C0|C1, "div.ar" }, \ 917 { 0x14, 0x1, C0|C1, "cycles_div_busy" }, \ 918 { 0x21, 0x0, C0|C1, "l2_ads" }, \ 919 { 0x22, 0x0, C0|C1, "l2_dbus_busy" }, \ 920 { 0x24, 0x0, C0|C1, "l2_lines_in" }, \ 921 { 0x25, 0x0, C0|C1, "l2_m_lines_in" }, \ 922 { 0x26, 0x0, C0|C1, "l2_lines_out" }, \ 923 { 0x27, 0x0, C0|C1, "l2_m_lines_out" }, \ 924 { 0x28, 0x0, C0|C1, "l2_ifetch" }, \ 925 { 0x29, 0x0, C0|C1, "l2_ld" }, \ 926 { 0x2A, 0x0, C0|C1, "l2_st" }, \ 927 { 0x2B, 0x0, C0|C1, "l2_lock" }, \ 928 { 0x2E, 0x0, C0|C1, "l2_rqsts" }, \ 929 { 0x2E, 0x41, C0|C1, "l2_rqsts.self.demand.i_state" }, \ 930 { 0x2E, 0x4F, C0|C1, "l2_rqsts.self.demand.mesi" }, \ 931 { 0x30, 0x0, C0|C1, "l2_reject_bus_q" }, \ 932 { 0x32, 0x0, C0|C1, "l2_no_req" }, \ 933 { 0x3A, 0x0, C0|C1, "eist_trans" }, \ 934 { 0x3B, 0xC0, C0|C1, "thermal_trip" }, \ 935 { 0x3C, 0x0, C0|C1, "cpu_clk_unhalted.core_p" }, \ 936 { 0x3C, 0x1, C0|C1, "cpu_clk_unhalted.bus" }, \ 937 { 0x3C, 0x2, C0|C1, "cpu_clk_unhalted.no_other" }, \ 938 { 0x40, 0x21, C0|C1, "l1d_cache.ld" }, \ 939 { 0x40, 0x22, C0|C1, "l1d_cache.st" }, \ 940 { 0x60, 0x0, C0|C1, "bus_request_outstanding" }, \ 941 { 0x61, 0x0, C0|C1, "bus_bnr_drv" }, \ 942 { 0x62, 0x0, C0|C1, "bus_drdy_clocks" }, \ 943 { 0x63, 0x0, C0|C1, "bus_lock_clocks" }, \ 944 { 0x64, 0x0, C0|C1, "bus_data_rcv" }, \ 945 { 0x65, 0x0, C0|C1, "bus_trans_brd" }, \ 946 { 0x66, 0x0, C0|C1, "bus_trans_rfo" }, \ 947 { 0x67, 0x0, C0|C1, "bus_trans_wb" }, \ 948 { 0x68, 0x0, C0|C1, "bus_trans_ifetch" }, \ 949 { 0x69, 0x0, C0|C1, "bus_trans_inval" }, \ 950 { 0x6A, 0x0, C0|C1, "bus_trans_pwr" }, \ 951 { 0x6B, 0x0, C0|C1, "bus_trans_p" }, \ 952 { 0x6C, 0x0, C0|C1, "bus_trans_io" }, \ 953 { 0x6D, 0x0, C0|C1, "bus_trans_def" }, \ 954 { 0x6E, 0x0, C0|C1, "bus_trans_burst" }, \ 955 { 0x6F, 0x0, C0|C1, "bus_trans_mem" }, \ 956 { 0x70, 0x0, C0|C1, "bus_trans_any" }, \ 957 { 0x77, 0x0, C0|C1, "ext_snoop" }, \ 958 { 0x7A, 0x0, C0|C1, "bus_hit_drv" }, \ 959 { 0x7B, 0x0, C0|C1, "bus_hitm_drv" }, \ 960 { 0x7D, 0x0, C0|C1, "busq_empty" }, \ 961 { 0x7E, 0x0, C0|C1, "snoop_stall_drv" }, \ 962 { 0x7F, 0x0, C0|C1, "bus_io_wait" }, \ 963 { 0x80, 0x3, C0|C1, "icache.accesses" }, \ 964 { 0x80, 0x2, C0|C1, "icache.misses" }, \ 965 { 0x82, 0x4, C0|C1, "itlb.flush" }, \ 966 { 0x82, 0x2, C0|C1, "itlb.misses" }, \ 967 { 0xAA, 0x2, C0|C1, "macro_insts.cisc_decoded" }, \ 968 { 0xAA, 0x3, C0|C1, "macro_insts.all_decoded" }, \ 969 { 0xB0, 0x0, C0|C1, "simd_uops_exec.s" }, \ 970 { 0xB0, 0x80, C0|C1, "simd_uops_exec.ar" }, \ 971 { 0xB1, 0x0, C0|C1, "simd_sat_uop_exec.s" }, \ 972 { 0xB1, 0x80, C0|C1, "simd_sat_uop_exec.ar" }, \ 973 { 0xB3, 0x1, C0|C1, "simd_uop_type_exec.mul.s" }, \ 974 { 0xB3, 0x81, C0|C1, "simd_uop_type_exec.mul.ar" }, \ 975 { 0xB3, 0x02, C0|C1, "simd_uop_type_exec.shift.s" }, \ 976 { 0xB3, 0x82, C0|C1, "simd_uop_type_exec.shift.ar" }, \ 977 { 0xB3, 0x04, C0|C1, "simd_uop_type_exec.pack.s" }, \ 978 { 0xB3, 0x84, C0|C1, "simd_uop_type_exec.pack.ar" }, \ 979 { 0xB3, 0x08, C0|C1, "simd_uop_type_exec.unpack.s" }, \ 980 { 0xB3, 0x88, C0|C1, "simd_uop_type_exec.unpack.ar" }, \ 981 { 0xB3, 0x10, C0|C1, "simd_uop_type_exec.logical.s" }, \ 982 { 0xB3, 0x90, C0|C1, "simd_uop_type_exec.logical.ar" }, \ 983 { 0xB3, 0x20, C0|C1, "simd_uop_type_exec.arithmetic.s" }, \ 984 { 0xB3, 0xA0, C0|C1, "simd_uop_type_exec.arithmetic.ar" }, \ 985 { 0xC2, 0x10, C0|C1, "uops_retired.any" }, \ 986 { 0xC3, 0x1, C0|C1, "machine_clears.smc" }, \ 987 { 0xC4, 0x0, C0|C1, "br_inst_retired.any" }, \ 988 { 0xC4, 0x1, C0|C1, "br_inst_retired.pred_not_taken" }, \ 989 { 0xC4, 0x2, C0|C1, "br_inst_retired.mispred_not_taken" }, \ 990 { 0xC4, 0x4, C0|C1, "br_inst_retired.pred_taken" }, \ 991 { 0xC4, 0x8, C0|C1, "br_inst_retired.mispred_taken" }, \ 992 { 0xC4, 0xA, C0|C1, "br_inst_retired.mispred" }, \ 993 { 0xC4, 0xC, C0|C1, "br_inst_retired.taken" }, \ 994 { 0xC4, 0xF, C0|C1, "br_inst_retired.any1" }, \ 995 { 0xC6, 0x1, C0|C1, "cycles_int_masked.cycles_int_masked" }, \ 996 { 0xC6, 0x2, C0|C1, \ 997 "cycles_int_masked.cycles_int_pending_and_masked" }, \ 998 { 0xC7, 0x1, C0|C1, "simd_inst_retired.packed_single" }, \ 999 { 0xC7, 0x2, C0|C1, "simd_inst_retired.scalar_single" }, \ 1000 { 0xC7, 0x4, C0|C1, "simd_inst_retired.packed_double" }, \ 1001 { 0xC7, 0x8, C0|C1, "simd_inst_retired.scalar_double" }, \ 1002 { 0xC7, 0x10, C0|C1, "simd_inst_retired.vector" }, \ 1003 { 0xC7, 0x1F, C0|C1, "simd_inst_retired.any" }, \ 1004 { 0xC8, 0x00, C0|C1, "hw_int_rcv" }, \ 1005 { 0xCA, 0x1, C0|C1, "simd_comp_inst_retired.packed_single" }, \ 1006 { 0xCA, 0x2, C0|C1, "simd_comp_inst_retired.scalar_single" }, \ 1007 { 0xCA, 0x4, C0|C1, "simd_comp_inst_retired.packed_double" }, \ 1008 { 0xCA, 0x8, C0|C1, "simd_comp_inst_retired.scalar_double" }, \ 1009 { 0xCB, 0x1, C0|C1, "mem_load_retired.l2_hit" }, \ 1010 { 0xCB, 0x2, C0|C1, "mem_load_retired.l2_miss" }, \ 1011 { 0xCB, 0x4, C0|C1, "mem_load_retired.dtlb_miss" }, \ 1012 { 0xCD, 0x0, C0|C1, "simd_assist" }, \ 1013 { 0xCE, 0x0, C0|C1, "simd_instr_retired" }, \ 1014 { 0xCF, 0x0, C0|C1, "simd_sat_instr_retired" }, \ 1015 { 0xE0, 0x1, C0|C1, "br_inst_decoded" }, \ 1016 { 0xE4, 0x1, C0|C1, "bogus_br" }, \ 1017 { 0xE6, 0x1, C0|C1, "baclears.any" } 1018 1019 static const struct events_table_t *events_table = NULL; 1020 1021 const struct events_table_t events_fam6_mod26[] = { 1022 GENERICEVENTS_FAM6_MOD26, 1023 EVENTS_FAM6_MOD26, 1024 { NT_END, 0, 0, "" } 1025 }; 1026 1027 const struct events_table_t events_fam6_mod28[] = { 1028 GENERICEVENTS_FAM6_MOD28, 1029 EVENTS_FAM6_MOD28, 1030 { NT_END, 0, 0, "" } 1031 }; 1032 1033 /* 1034 * Initialize string containing list of supported general-purpose counter 1035 * events for processors of Penryn and Merom Family 1036 */ 1037 static void 1038 pcbe_init_core_uarch() 1039 { 1040 const struct nametable_core_uarch *n; 1041 const struct generic_events *k; 1042 const struct nametable_core_uarch *picspecific_events; 1043 const struct generic_events *picspecific_genericevents; 1044 size_t common_size; 1045 size_t size; 1046 uint64_t i; 1047 1048 gpc_names = kmem_alloc(num_gpc * sizeof (char *), KM_SLEEP); 1049 1050 /* Calculate space needed to save all the common event names */ 1051 common_size = 0; 1052 for (n = cmn_gpc_events_core_uarch; n->event_num != NT_END; n++) { 1053 common_size += strlen(n->name) + 1; 1054 } 1055 1056 for (k = cmn_generic_events; k->event_num != NT_END; k++) { 1057 common_size += strlen(k->name) + 1; 1058 } 1059 1060 for (i = 0; i < num_gpc; i++) { 1061 size = 0; 1062 picspecific_genericevents = NULL; 1063 1064 switch (i) { 1065 case 0: 1066 picspecific_events = pic0_events; 1067 picspecific_genericevents = generic_events_pic0; 1068 break; 1069 case 1: 1070 picspecific_events = pic1_events; 1071 break; 1072 default: 1073 picspecific_events = NULL; 1074 break; 1075 } 1076 if (picspecific_events != NULL) { 1077 for (n = picspecific_events; 1078 n->event_num != NT_END; 1079 n++) { 1080 size += strlen(n->name) + 1; 1081 } 1082 } 1083 if (picspecific_genericevents != NULL) { 1084 for (k = picspecific_genericevents; 1085 k->event_num != NT_END; k++) { 1086 size += strlen(k->name) + 1; 1087 } 1088 } 1089 1090 gpc_names[i] = 1091 kmem_alloc(size + common_size + 1, KM_SLEEP); 1092 1093 gpc_names[i][0] = '\0'; 1094 if (picspecific_events != NULL) { 1095 for (n = picspecific_events; 1096 n->event_num != NT_END; n++) { 1097 (void) strcat(gpc_names[i], n->name); 1098 (void) strcat(gpc_names[i], ","); 1099 } 1100 } 1101 if (picspecific_genericevents != NULL) { 1102 for (k = picspecific_genericevents; 1103 k->event_num != NT_END; k++) { 1104 (void) strcat(gpc_names[i], k->name); 1105 (void) strcat(gpc_names[i], ","); 1106 } 1107 } 1108 for (n = cmn_gpc_events_core_uarch; n->event_num != NT_END; 1109 n++) { 1110 (void) strcat(gpc_names[i], n->name); 1111 (void) strcat(gpc_names[i], ","); 1112 } 1113 for (k = cmn_generic_events; k->event_num != NT_END; k++) { 1114 (void) strcat(gpc_names[i], k->name); 1115 (void) strcat(gpc_names[i], ","); 1116 } 1117 1118 /* 1119 * Remove trailing comma. 1120 */ 1121 gpc_names[i][common_size + size - 1] = '\0'; 1122 } 1123 } 1124 1125 static int 1126 core_pcbe_init(void) 1127 { 1128 struct cpuid_regs cp; 1129 size_t size; 1130 uint64_t i; 1131 uint64_t j; 1132 uint64_t arch_events_vector_length; 1133 size_t arch_events_string_length; 1134 1135 if (cpuid_getvendor(CPU) != X86_VENDOR_Intel) 1136 return (-1); 1137 1138 /* Obtain Basic CPUID information */ 1139 cp.cp_eax = 0x0; 1140 (void) __cpuid_insn(&cp); 1141 1142 /* No Architectural Performance Monitoring Leaf returned by CPUID */ 1143 if (cp.cp_eax < 0xa) { 1144 return (-1); 1145 } 1146 1147 /* Obtain the Architectural Performance Monitoring Leaf */ 1148 cp.cp_eax = 0xa; 1149 (void) __cpuid_insn(&cp); 1150 1151 versionid = cp.cp_eax & 0xFF; 1152 1153 /* 1154 * Fixed-Function Counters (FFC) 1155 * 1156 * All Family 6 Model 15 and Model 23 processors have fixed-function 1157 * counters. These counters were made Architectural with 1158 * Family 6 Model 15 Stepping 9. 1159 */ 1160 switch (versionid) { 1161 1162 case 0: 1163 return (-1); 1164 1165 case 2: 1166 num_ffc = cp.cp_edx & 0x1F; 1167 width_ffc = (cp.cp_edx >> 5) & 0xFF; 1168 1169 /* 1170 * Some processors have an errata (AW34) where 1171 * versionid is reported as 2 when actually 1. 1172 * In this case, fixed-function counters are 1173 * model-specific as in Version 1. 1174 */ 1175 if (num_ffc != 0) { 1176 break; 1177 } 1178 /* FALLTHROUGH */ 1179 case 1: 1180 num_ffc = 3; 1181 width_ffc = 40; 1182 versionid = 1; 1183 break; 1184 1185 default: 1186 num_ffc = cp.cp_edx & 0x1F; 1187 width_ffc = (cp.cp_edx >> 5) & 0xFF; 1188 break; 1189 } 1190 1191 1192 if (num_ffc >= 64) 1193 return (-1); 1194 1195 /* Set HTT-specific names of architectural & FFC events */ 1196 if (x86_feature & X86_HTT) { 1197 ffc_names = ffc_names_htt; 1198 arch_events_table = arch_events_table_htt; 1199 known_arch_events = 1200 sizeof (arch_events_table_htt) / 1201 sizeof (struct events_table_t); 1202 known_ffc_num = 1203 sizeof (ffc_names_htt) / sizeof (char *); 1204 } else { 1205 ffc_names = ffc_names_non_htt; 1206 arch_events_table = arch_events_table_non_htt; 1207 known_arch_events = 1208 sizeof (arch_events_table_non_htt) / 1209 sizeof (struct events_table_t); 1210 known_ffc_num = 1211 sizeof (ffc_names_non_htt) / sizeof (char *); 1212 } 1213 1214 if (num_ffc >= known_ffc_num) { 1215 /* 1216 * The system seems to have more fixed-function counters than 1217 * what this PCBE is able to handle correctly. Default to the 1218 * maximum number of fixed-function counters that this driver 1219 * is aware of. 1220 */ 1221 num_ffc = known_ffc_num - 1; 1222 } 1223 1224 mask_ffc = BITMASK_XBITS(width_ffc); 1225 control_ffc = BITMASK_XBITS(num_ffc); 1226 1227 /* 1228 * General Purpose Counters (GPC) 1229 */ 1230 num_gpc = (cp.cp_eax >> 8) & 0xFF; 1231 width_gpc = (cp.cp_eax >> 16) & 0xFF; 1232 1233 if (num_gpc >= 64) 1234 return (-1); 1235 1236 mask_gpc = BITMASK_XBITS(width_gpc); 1237 1238 control_gpc = BITMASK_XBITS(num_gpc); 1239 1240 control_mask = (control_ffc << 32) | control_gpc; 1241 1242 total_pmc = num_gpc + num_ffc; 1243 if (total_pmc > 64) { 1244 /* Too wide for the overflow bitmap */ 1245 return (-1); 1246 } 1247 1248 /* FFC names */ 1249 ffc_allnames = kmem_alloc(num_ffc * sizeof (char *), KM_SLEEP); 1250 for (i = 0; i < num_ffc; i++) { 1251 ffc_allnames[i] = kmem_alloc( 1252 strlen(ffc_names[i]) + strlen(ffc_genericnames[i]) + 2, 1253 KM_SLEEP); 1254 1255 ffc_allnames[i][0] = '\0'; 1256 (void) strcat(ffc_allnames[i], ffc_names[i]); 1257 1258 /* Check if this ffc has a generic name */ 1259 if (strcmp(ffc_genericnames[i], "") != 0) { 1260 (void) strcat(ffc_allnames[i], ","); 1261 (void) strcat(ffc_allnames[i], ffc_genericnames[i]); 1262 } 1263 } 1264 1265 /* GPC events for Family 6 Models 15, 23 and 29 only */ 1266 if ((cpuid_getfamily(CPU) == 6) && 1267 ((cpuid_getmodel(CPU) == 15) || (cpuid_getmodel(CPU) == 23) || 1268 (cpuid_getmodel(CPU) == 29))) { 1269 (void) snprintf(core_impl_name, IMPL_NAME_LEN, 1270 "Core Microarchitecture"); 1271 pcbe_init_core_uarch(); 1272 return (0); 1273 } 1274 1275 (void) snprintf(core_impl_name, IMPL_NAME_LEN, 1276 "Intel Arch PerfMon v%d on Family %d Model %d", 1277 versionid, cpuid_getfamily(CPU), cpuid_getmodel(CPU)); 1278 1279 /* 1280 * Architectural events 1281 */ 1282 arch_events_vector_length = (cp.cp_eax >> 24) & 0xFF; 1283 1284 ASSERT(known_arch_events == arch_events_vector_length); 1285 1286 /* 1287 * To handle the case where a new performance monitoring setup is run 1288 * on a non-debug kernel 1289 */ 1290 if (known_arch_events > arch_events_vector_length) { 1291 known_arch_events = arch_events_vector_length; 1292 } else { 1293 arch_events_vector_length = known_arch_events; 1294 } 1295 1296 arch_events_vector = cp.cp_ebx & 1297 BITMASK_XBITS(arch_events_vector_length); 1298 1299 /* 1300 * Process architectural and non-architectural events using GPC 1301 */ 1302 if (num_gpc > 0) { 1303 1304 gpc_names = kmem_alloc(num_gpc * sizeof (char *), KM_SLEEP); 1305 1306 /* Calculate space required for the architectural gpc events */ 1307 arch_events_string_length = 0; 1308 for (i = 0; i < known_arch_events; i++) { 1309 if (((1U << i) & arch_events_vector) == 0) { 1310 arch_events_string_length += 1311 strlen(arch_events_table[i].name) + 1; 1312 if (strcmp(arch_genevents_table[i], "") != 0) { 1313 arch_events_string_length += 1314 strlen(arch_genevents_table[i]) + 1; 1315 } 1316 } 1317 } 1318 1319 /* Non-architectural events list */ 1320 if (cpuid_getmodel(CPU) == 26) { 1321 events_table = events_fam6_mod26; 1322 } else if (cpuid_getmodel(CPU) == 28) { 1323 events_table = events_fam6_mod28; 1324 } 1325 1326 for (i = 0; i < num_gpc; i++) { 1327 1328 /* 1329 * Determine length of all supported event names 1330 * (architectural + non-architectural) 1331 */ 1332 size = arch_events_string_length; 1333 for (j = 0; events_table != NULL && 1334 events_table[j].eventselect != NT_END; 1335 j++) { 1336 if (C(i) & events_table[j].supported_counters) { 1337 size += strlen(events_table[j].name) + 1338 1; 1339 } 1340 } 1341 1342 /* Allocate memory for this pics list */ 1343 gpc_names[i] = kmem_alloc(size + 1, KM_SLEEP); 1344 gpc_names[i][0] = '\0'; 1345 if (size == 0) { 1346 continue; 1347 } 1348 1349 /* 1350 * Create the list of all supported events 1351 * (architectural + non-architectural) 1352 */ 1353 for (j = 0; j < known_arch_events; j++) { 1354 if (((1U << j) & arch_events_vector) == 0) { 1355 (void) strcat(gpc_names[i], 1356 arch_events_table[j].name); 1357 (void) strcat(gpc_names[i], ","); 1358 if (strcmp( 1359 arch_genevents_table[j], "") 1360 != 0) { 1361 (void) strcat(gpc_names[i], 1362 arch_genevents_table[j]); 1363 (void) strcat(gpc_names[i], 1364 ","); 1365 } 1366 } 1367 } 1368 1369 for (j = 0; events_table != NULL && 1370 events_table[j].eventselect != NT_END; 1371 j++) { 1372 if (C(i) & events_table[j].supported_counters) { 1373 (void) strcat(gpc_names[i], 1374 events_table[j].name); 1375 (void) strcat(gpc_names[i], ","); 1376 } 1377 } 1378 1379 /* Remove trailing comma */ 1380 gpc_names[i][size - 1] = '\0'; 1381 } 1382 } 1383 1384 return (0); 1385 } 1386 1387 static uint_t core_pcbe_ncounters() 1388 { 1389 return (total_pmc); 1390 } 1391 1392 static const char *core_pcbe_impl_name(void) 1393 { 1394 return (core_impl_name); 1395 } 1396 1397 static const char *core_pcbe_cpuref(void) 1398 { 1399 return (core_cpuref); 1400 } 1401 1402 static char *core_pcbe_list_events(uint_t picnum) 1403 { 1404 ASSERT(picnum < cpc_ncounters); 1405 1406 if (picnum < num_gpc) { 1407 return (gpc_names[picnum]); 1408 } else { 1409 return (ffc_allnames[picnum - num_gpc]); 1410 } 1411 } 1412 1413 static char *core_pcbe_list_attrs(void) 1414 { 1415 if (versionid >= 3) { 1416 return ("edge,inv,umask,cmask,anythr"); 1417 } else { 1418 return ("edge,pc,inv,umask,cmask"); 1419 } 1420 } 1421 1422 static const struct nametable_core_uarch * 1423 find_gpcevent_core_uarch(char *name, 1424 const struct nametable_core_uarch *nametable) 1425 { 1426 const struct nametable_core_uarch *n; 1427 int compare_result = -1; 1428 1429 for (n = nametable; n->event_num != NT_END; n++) { 1430 compare_result = strcmp(name, n->name); 1431 if (compare_result <= 0) { 1432 break; 1433 } 1434 } 1435 1436 if (compare_result == 0) { 1437 return (n); 1438 } 1439 1440 return (NULL); 1441 } 1442 1443 static const struct generic_events * 1444 find_generic_events(char *name, const struct generic_events *table) 1445 { 1446 const struct generic_events *n; 1447 1448 for (n = table; n->event_num != NT_END; n++) { 1449 if (strcmp(name, n->name) == 0) { 1450 return (n); 1451 }; 1452 } 1453 1454 return (NULL); 1455 } 1456 1457 static const struct events_table_t * 1458 find_gpcevent(char *name) 1459 { 1460 int i; 1461 1462 /* Search architectural events */ 1463 for (i = 0; i < known_arch_events; i++) { 1464 if (strcmp(name, arch_events_table[i].name) == 0 || 1465 strcmp(name, arch_genevents_table[i]) == 0) { 1466 if (((1U << i) & arch_events_vector) == 0) { 1467 return (&arch_events_table[i]); 1468 } 1469 } 1470 } 1471 1472 /* Search non-architectural events */ 1473 if (events_table != NULL) { 1474 for (i = 0; events_table[i].eventselect != NT_END; i++) { 1475 if (strcmp(name, events_table[i].name) == 0) { 1476 return (&events_table[i]); 1477 } 1478 } 1479 } 1480 1481 return (NULL); 1482 } 1483 1484 static uint64_t 1485 core_pcbe_event_coverage(char *event) 1486 { 1487 uint64_t bitmap; 1488 uint64_t bitmask; 1489 const struct events_table_t *n; 1490 int i; 1491 1492 bitmap = 0; 1493 1494 /* Is it an event that a GPC can track? */ 1495 if (versionid >= 3) { 1496 n = find_gpcevent(event); 1497 if (n != NULL) { 1498 bitmap |= (n->supported_counters & 1499 BITMASK_XBITS(num_gpc)); 1500 } 1501 } else { 1502 if (find_generic_events(event, cmn_generic_events) != NULL) { 1503 bitmap |= BITMASK_XBITS(num_gpc); 1504 } if (find_generic_events(event, generic_events_pic0) != NULL) { 1505 bitmap |= 1ULL; 1506 } else if (find_gpcevent_core_uarch(event, 1507 cmn_gpc_events_core_uarch) != NULL) { 1508 bitmap |= BITMASK_XBITS(num_gpc); 1509 } else if (find_gpcevent_core_uarch(event, pic0_events) != 1510 NULL) { 1511 bitmap |= 1ULL; 1512 } else if (find_gpcevent_core_uarch(event, pic1_events) != 1513 NULL) { 1514 bitmap |= 1ULL << 1; 1515 } 1516 } 1517 1518 /* Check if the event can be counted in the fixed-function counters */ 1519 if (num_ffc > 0) { 1520 bitmask = 1ULL << num_gpc; 1521 for (i = 0; i < num_ffc; i++) { 1522 if (strcmp(event, ffc_names[i]) == 0) { 1523 bitmap |= bitmask; 1524 } else if (strcmp(event, ffc_genericnames[i]) == 0) { 1525 bitmap |= bitmask; 1526 } 1527 bitmask = bitmask << 1; 1528 } 1529 } 1530 1531 return (bitmap); 1532 } 1533 1534 static uint64_t 1535 core_pcbe_overflow_bitmap(void) 1536 { 1537 uint64_t interrupt_status; 1538 uint64_t intrbits_ffc; 1539 uint64_t intrbits_gpc; 1540 extern int kcpc_hw_overflow_intr_installed; 1541 uint64_t overflow_bitmap; 1542 1543 RDMSR(PERF_GLOBAL_STATUS, interrupt_status); 1544 WRMSR(PERF_GLOBAL_OVF_CTRL, interrupt_status); 1545 1546 interrupt_status = interrupt_status & control_mask; 1547 intrbits_ffc = (interrupt_status >> 32) & control_ffc; 1548 intrbits_gpc = interrupt_status & control_gpc; 1549 overflow_bitmap = (intrbits_ffc << num_gpc) | intrbits_gpc; 1550 1551 ASSERT(kcpc_hw_overflow_intr_installed); 1552 (*kcpc_hw_enable_cpc_intr)(); 1553 1554 return (overflow_bitmap); 1555 } 1556 1557 static int 1558 check_cpc_securitypolicy(core_pcbe_config_t *conf, 1559 const struct nametable_core_uarch *n) 1560 { 1561 if (conf->core_ctl & n->restricted_bits) { 1562 if (secpolicy_cpc_cpu(crgetcred()) != 0) { 1563 return (CPC_ATTR_REQUIRES_PRIVILEGE); 1564 } 1565 } 1566 return (0); 1567 } 1568 1569 static int 1570 configure_gpc(uint_t picnum, char *event, uint64_t preset, uint32_t flags, 1571 uint_t nattrs, kcpc_attr_t *attrs, void **data) 1572 { 1573 core_pcbe_config_t conf; 1574 const struct nametable_core_uarch *n; 1575 const struct generic_events *k = NULL; 1576 const struct nametable_core_uarch *m; 1577 const struct nametable_core_uarch *picspecific_events; 1578 struct nametable_core_uarch nt_raw = { "", 0x0, 0x0 }; 1579 uint_t i; 1580 long event_num; 1581 const struct events_table_t *eventcode; 1582 1583 if (((preset & BITS_EXTENDED_FROM_31) != 0) && 1584 ((preset & BITS_EXTENDED_FROM_31) != 1585 BITS_EXTENDED_FROM_31)) { 1586 1587 /* 1588 * Bits beyond bit-31 in the general-purpose counters can only 1589 * be written to by extension of bit 31. We cannot preset 1590 * these bits to any value other than all 1s or all 0s. 1591 */ 1592 return (CPC_ATTRIBUTE_OUT_OF_RANGE); 1593 } 1594 1595 if (versionid >= 3) { 1596 eventcode = find_gpcevent(event); 1597 if (eventcode != NULL) { 1598 if ((C(picnum) & eventcode->supported_counters) == 0) { 1599 return (CPC_PIC_NOT_CAPABLE); 1600 } 1601 if (nattrs > 0 && 1602 (strncmp("PAPI_", event, 5) == 0)) { 1603 return (CPC_ATTRIBUTE_OUT_OF_RANGE); 1604 } 1605 conf.core_ctl = eventcode->eventselect; 1606 conf.core_ctl |= eventcode->unitmask << 1607 CORE_UMASK_SHIFT; 1608 } else { 1609 /* Event specified as raw event code */ 1610 if (ddi_strtol(event, NULL, 0, &event_num) != 0) { 1611 return (CPC_INVALID_EVENT); 1612 } 1613 conf.core_ctl = event_num & 0xFF; 1614 } 1615 } else { 1616 if ((k = find_generic_events(event, cmn_generic_events)) != 1617 NULL || 1618 (picnum == 0 && 1619 (k = find_generic_events(event, generic_events_pic0)) != 1620 NULL)) { 1621 if (nattrs > 0) { 1622 return (CPC_ATTRIBUTE_OUT_OF_RANGE); 1623 } 1624 conf.core_ctl = k->event_num; 1625 conf.core_ctl |= k->umask << CORE_UMASK_SHIFT; 1626 } else { 1627 /* Not a generic event */ 1628 1629 n = find_gpcevent_core_uarch(event, 1630 cmn_gpc_events_core_uarch); 1631 if (n == NULL) { 1632 switch (picnum) { 1633 case 0: 1634 picspecific_events = 1635 pic0_events; 1636 break; 1637 case 1: 1638 picspecific_events = 1639 pic1_events; 1640 break; 1641 default: 1642 picspecific_events = NULL; 1643 break; 1644 } 1645 if (picspecific_events != NULL) { 1646 n = find_gpcevent_core_uarch(event, 1647 picspecific_events); 1648 } 1649 } 1650 if (n == NULL) { 1651 1652 /* 1653 * Check if this is a case where the event was 1654 * specified directly by its event number 1655 * instead of its name string. 1656 */ 1657 if (ddi_strtol(event, NULL, 0, &event_num) != 1658 0) { 1659 return (CPC_INVALID_EVENT); 1660 } 1661 1662 event_num = event_num & 0xFF; 1663 1664 /* 1665 * Search the event table to find out if the 1666 * event specified has an privilege 1667 * requirements. Currently none of the 1668 * pic-specific counters have any privilege 1669 * requirements. Hence only the table 1670 * cmn_gpc_events_core_uarch is searched. 1671 */ 1672 for (m = cmn_gpc_events_core_uarch; 1673 m->event_num != NT_END; 1674 m++) { 1675 if (event_num == m->event_num) { 1676 break; 1677 } 1678 } 1679 if (m->event_num == NT_END) { 1680 nt_raw.event_num = (uint8_t)event_num; 1681 n = &nt_raw; 1682 } else { 1683 n = m; 1684 } 1685 } 1686 conf.core_ctl = n->event_num; /* Event Select */ 1687 } 1688 } 1689 1690 1691 conf.core_picno = picnum; 1692 conf.core_pictype = CORE_GPC; 1693 conf.core_rawpic = preset & mask_gpc; 1694 1695 conf.core_pes = GPC_BASE_PES + picnum; 1696 conf.core_pmc = GPC_BASE_PMC + picnum; 1697 1698 for (i = 0; i < nattrs; i++) { 1699 if (strncmp(attrs[i].ka_name, "umask", 6) == 0) { 1700 if ((attrs[i].ka_val | CORE_UMASK_MASK) != 1701 CORE_UMASK_MASK) { 1702 return (CPC_ATTRIBUTE_OUT_OF_RANGE); 1703 } 1704 /* Clear out the default umask */ 1705 conf.core_ctl &= ~ (CORE_UMASK_MASK << 1706 CORE_UMASK_SHIFT); 1707 /* Use the user provided umask */ 1708 conf.core_ctl |= attrs[i].ka_val << 1709 CORE_UMASK_SHIFT; 1710 } else if (strncmp(attrs[i].ka_name, "edge", 6) == 0) { 1711 if (attrs[i].ka_val != 0) 1712 conf.core_ctl |= CORE_EDGE; 1713 } else if (strncmp(attrs[i].ka_name, "inv", 4) == 0) { 1714 if (attrs[i].ka_val != 0) 1715 conf.core_ctl |= CORE_INV; 1716 } else if (strncmp(attrs[i].ka_name, "cmask", 6) == 0) { 1717 if ((attrs[i].ka_val | CORE_CMASK_MASK) != 1718 CORE_CMASK_MASK) { 1719 return (CPC_ATTRIBUTE_OUT_OF_RANGE); 1720 } 1721 conf.core_ctl |= attrs[i].ka_val << 1722 CORE_CMASK_SHIFT; 1723 } else if (strncmp(attrs[i].ka_name, "anythr", 7) == 1724 0) { 1725 if (versionid < 3) 1726 return (CPC_INVALID_ATTRIBUTE); 1727 if (secpolicy_cpc_cpu(crgetcred()) != 0) { 1728 return (CPC_ATTR_REQUIRES_PRIVILEGE); 1729 } 1730 if (attrs[i].ka_val != 0) 1731 conf.core_ctl |= CORE_ANYTHR; 1732 } else { 1733 return (CPC_INVALID_ATTRIBUTE); 1734 } 1735 } 1736 1737 if (flags & CPC_COUNT_USER) 1738 conf.core_ctl |= CORE_USR; 1739 if (flags & CPC_COUNT_SYSTEM) 1740 conf.core_ctl |= CORE_OS; 1741 if (flags & CPC_OVF_NOTIFY_EMT) 1742 conf.core_ctl |= CORE_INT; 1743 conf.core_ctl |= CORE_EN; 1744 1745 if (versionid < 3 && k == NULL) { 1746 if (check_cpc_securitypolicy(&conf, n) != 0) { 1747 return (CPC_ATTR_REQUIRES_PRIVILEGE); 1748 } 1749 } 1750 1751 *data = kmem_alloc(sizeof (core_pcbe_config_t), KM_SLEEP); 1752 *((core_pcbe_config_t *)*data) = conf; 1753 1754 return (0); 1755 } 1756 1757 static int 1758 configure_ffc(uint_t picnum, char *event, uint64_t preset, uint32_t flags, 1759 uint_t nattrs, kcpc_attr_t *attrs, void **data) 1760 { 1761 core_pcbe_config_t *conf; 1762 uint_t i; 1763 1764 if (picnum - num_gpc >= num_ffc) { 1765 return (CPC_INVALID_PICNUM); 1766 } 1767 1768 if ((strcmp(ffc_names[picnum-num_gpc], event) != 0) && 1769 (strcmp(ffc_genericnames[picnum-num_gpc], event) != 0)) { 1770 return (CPC_INVALID_EVENT); 1771 } 1772 1773 if ((versionid < 3) && (nattrs != 0)) { 1774 return (CPC_INVALID_ATTRIBUTE); 1775 } 1776 1777 conf = kmem_alloc(sizeof (core_pcbe_config_t), KM_SLEEP); 1778 conf->core_ctl = 0; 1779 1780 for (i = 0; i < nattrs; i++) { 1781 if (strncmp(attrs[i].ka_name, "anythr", 7) == 0) { 1782 if (secpolicy_cpc_cpu(crgetcred()) != 0) { 1783 return (CPC_ATTR_REQUIRES_PRIVILEGE); 1784 } 1785 if (attrs[i].ka_val != 0) { 1786 conf->core_ctl |= CORE_FFC_ANYTHR; 1787 } 1788 } else { 1789 kmem_free(conf, sizeof (core_pcbe_config_t)); 1790 return (CPC_INVALID_ATTRIBUTE); 1791 } 1792 } 1793 1794 conf->core_picno = picnum; 1795 conf->core_pictype = CORE_FFC; 1796 conf->core_rawpic = preset & mask_ffc; 1797 conf->core_pmc = FFC_BASE_PMC + (picnum - num_gpc); 1798 1799 /* All fixed-function counters have the same control register */ 1800 conf->core_pes = PERF_FIXED_CTR_CTRL; 1801 1802 if (flags & CPC_COUNT_USER) 1803 conf->core_ctl |= CORE_FFC_USR_EN; 1804 if (flags & CPC_COUNT_SYSTEM) 1805 conf->core_ctl |= CORE_FFC_OS_EN; 1806 if (flags & CPC_OVF_NOTIFY_EMT) 1807 conf->core_ctl |= CORE_FFC_PMI; 1808 1809 *data = conf; 1810 return (0); 1811 } 1812 1813 /*ARGSUSED*/ 1814 static int 1815 core_pcbe_configure(uint_t picnum, char *event, uint64_t preset, 1816 uint32_t flags, uint_t nattrs, kcpc_attr_t *attrs, void **data, 1817 void *token) 1818 { 1819 int ret; 1820 core_pcbe_config_t *conf; 1821 1822 /* 1823 * If we've been handed an existing configuration, we need only preset 1824 * the counter value. 1825 */ 1826 if (*data != NULL) { 1827 conf = *data; 1828 ASSERT(conf->core_pictype == CORE_GPC || 1829 conf->core_pictype == CORE_FFC); 1830 if (conf->core_pictype == CORE_GPC) 1831 conf->core_rawpic = preset & mask_gpc; 1832 else /* CORE_FFC */ 1833 conf->core_rawpic = preset & mask_ffc; 1834 return (0); 1835 } 1836 1837 if (picnum >= total_pmc) { 1838 return (CPC_INVALID_PICNUM); 1839 } 1840 1841 if (picnum < num_gpc) { 1842 ret = configure_gpc(picnum, event, preset, flags, 1843 nattrs, attrs, data); 1844 } else { 1845 ret = configure_ffc(picnum, event, preset, flags, 1846 nattrs, attrs, data); 1847 } 1848 return (ret); 1849 } 1850 1851 static void 1852 core_pcbe_program(void *token) 1853 { 1854 core_pcbe_config_t *cfg; 1855 uint64_t perf_global_ctrl; 1856 uint64_t perf_fixed_ctr_ctrl; 1857 uint64_t curcr4; 1858 1859 core_pcbe_allstop(); 1860 1861 curcr4 = getcr4(); 1862 if (kcpc_allow_nonpriv(token)) 1863 /* Allow RDPMC at any ring level */ 1864 setcr4(curcr4 | CR4_PCE); 1865 else 1866 /* Allow RDPMC only at ring 0 */ 1867 setcr4(curcr4 & ~CR4_PCE); 1868 1869 /* Clear any overflow indicators before programming the counters */ 1870 WRMSR(PERF_GLOBAL_OVF_CTRL, MASK_CONDCHGD_OVFBUFFER | control_mask); 1871 1872 cfg = NULL; 1873 perf_global_ctrl = 0; 1874 perf_fixed_ctr_ctrl = 0; 1875 cfg = (core_pcbe_config_t *)kcpc_next_config(token, cfg, NULL); 1876 while (cfg != NULL) { 1877 ASSERT(cfg->core_pictype == CORE_GPC || 1878 cfg->core_pictype == CORE_FFC); 1879 1880 if (cfg->core_pictype == CORE_GPC) { 1881 /* 1882 * General-purpose counter registers have write 1883 * restrictions where only the lower 32-bits can be 1884 * written to. The rest of the relevant bits are 1885 * written to by extension from bit 31 (all ZEROS if 1886 * bit-31 is ZERO and all ONE if bit-31 is ONE). This 1887 * makes it possible to write to the counter register 1888 * only values that have all ONEs or all ZEROs in the 1889 * higher bits. 1890 */ 1891 if (((cfg->core_rawpic & BITS_EXTENDED_FROM_31) == 0) || 1892 ((cfg->core_rawpic & BITS_EXTENDED_FROM_31) == 1893 BITS_EXTENDED_FROM_31)) { 1894 /* 1895 * Straighforward case where the higher bits 1896 * are all ZEROs or all ONEs. 1897 */ 1898 WRMSR(cfg->core_pmc, 1899 (cfg->core_rawpic & mask_gpc)); 1900 } else { 1901 /* 1902 * The high order bits are not all the same. 1903 * We save what is currently in the registers 1904 * and do not write to it. When we want to do 1905 * a read from this register later (in 1906 * core_pcbe_sample()), we subtract the value 1907 * we save here to get the actual event count. 1908 * 1909 * NOTE: As a result, we will not get overflow 1910 * interrupts as expected. 1911 */ 1912 RDMSR(cfg->core_pmc, cfg->core_rawpic); 1913 cfg->core_rawpic = cfg->core_rawpic & mask_gpc; 1914 } 1915 WRMSR(cfg->core_pes, cfg->core_ctl); 1916 perf_global_ctrl |= 1ull << cfg->core_picno; 1917 } else { 1918 /* 1919 * Unlike the general-purpose counters, all relevant 1920 * bits of fixed-function counters can be written to. 1921 */ 1922 WRMSR(cfg->core_pmc, cfg->core_rawpic & mask_ffc); 1923 1924 /* 1925 * Collect the control bits for all the 1926 * fixed-function counters and write it at one shot 1927 * later in this function 1928 */ 1929 perf_fixed_ctr_ctrl |= cfg->core_ctl << 1930 ((cfg->core_picno - num_gpc) * CORE_FFC_ATTR_SIZE); 1931 perf_global_ctrl |= 1932 1ull << (cfg->core_picno - num_gpc + 32); 1933 } 1934 1935 cfg = (core_pcbe_config_t *) 1936 kcpc_next_config(token, cfg, NULL); 1937 } 1938 1939 /* Enable all the counters */ 1940 WRMSR(PERF_FIXED_CTR_CTRL, perf_fixed_ctr_ctrl); 1941 WRMSR(PERF_GLOBAL_CTRL, perf_global_ctrl); 1942 } 1943 1944 static void 1945 core_pcbe_allstop(void) 1946 { 1947 /* Disable all the counters together */ 1948 WRMSR(PERF_GLOBAL_CTRL, ALL_STOPPED); 1949 1950 setcr4(getcr4() & ~CR4_PCE); 1951 } 1952 1953 static void 1954 core_pcbe_sample(void *token) 1955 { 1956 uint64_t *daddr; 1957 uint64_t curpic; 1958 core_pcbe_config_t *cfg; 1959 uint64_t counter_mask; 1960 1961 cfg = (core_pcbe_config_t *)kcpc_next_config(token, NULL, &daddr); 1962 while (cfg != NULL) { 1963 ASSERT(cfg->core_pictype == CORE_GPC || 1964 cfg->core_pictype == CORE_FFC); 1965 1966 curpic = rdmsr(cfg->core_pmc); 1967 1968 DTRACE_PROBE4(core__pcbe__sample, 1969 uint64_t, cfg->core_pmc, 1970 uint64_t, curpic, 1971 uint64_t, cfg->core_rawpic, 1972 uint64_t, *daddr); 1973 1974 if (cfg->core_pictype == CORE_GPC) { 1975 counter_mask = mask_gpc; 1976 } else { 1977 counter_mask = mask_ffc; 1978 } 1979 curpic = curpic & counter_mask; 1980 if (curpic >= cfg->core_rawpic) { 1981 *daddr += curpic - cfg->core_rawpic; 1982 } else { 1983 /* Counter overflowed since our last sample */ 1984 *daddr += counter_mask - (cfg->core_rawpic - curpic) + 1985 1; 1986 } 1987 cfg->core_rawpic = *daddr & counter_mask; 1988 1989 cfg = 1990 (core_pcbe_config_t *)kcpc_next_config(token, cfg, &daddr); 1991 } 1992 } 1993 1994 static void 1995 core_pcbe_free(void *config) 1996 { 1997 kmem_free(config, sizeof (core_pcbe_config_t)); 1998 } 1999 2000 static struct modlpcbe core_modlpcbe = { 2001 &mod_pcbeops, 2002 "Core Performance Counters", 2003 &core_pcbe_ops 2004 }; 2005 2006 static struct modlinkage core_modl = { 2007 MODREV_1, 2008 &core_modlpcbe, 2009 }; 2010 2011 int 2012 _init(void) 2013 { 2014 if (core_pcbe_init() != 0) { 2015 return (ENOTSUP); 2016 } 2017 return (mod_install(&core_modl)); 2018 } 2019 2020 int 2021 _fini(void) 2022 { 2023 return (mod_remove(&core_modl)); 2024 } 2025 2026 int 2027 _info(struct modinfo *mi) 2028 { 2029 return (mod_info(&core_modl, mi)); 2030 } 2031