10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51772Sjl139090 * Common Development and Distribution License (the "License"). 61772Sjl139090 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*7656SSherry.Moore@Sun.COM * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate 270Sstevel@tonic-gate #include <sys/systm.h> 280Sstevel@tonic-gate #include <sys/conf.h> 290Sstevel@tonic-gate #include <sys/stat.h> 300Sstevel@tonic-gate #include <sys/ddi.h> 310Sstevel@tonic-gate #include <sys/sunddi.h> 320Sstevel@tonic-gate #include <sys/modctl.h> 330Sstevel@tonic-gate #include <sys/cpu_module.h> 340Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 350Sstevel@tonic-gate #include <vm/seg_kmem.h> 360Sstevel@tonic-gate #include <vm/seg_kpm.h> 370Sstevel@tonic-gate #include <vm/vm_dep.h> 380Sstevel@tonic-gate #include <sys/machsystm.h> 390Sstevel@tonic-gate #include <sys/machasi.h> 400Sstevel@tonic-gate #include <sys/sysmacros.h> 410Sstevel@tonic-gate #include <sys/callb.h> 420Sstevel@tonic-gate #include <sys/archsystm.h> 430Sstevel@tonic-gate #include <sys/trapstat.h> 440Sstevel@tonic-gate #ifdef sun4v 450Sstevel@tonic-gate #include <sys/hypervisor_api.h> 460Sstevel@tonic-gate #endif 471772Sjl139090 #ifndef sun4v 483434Sesaxe #include <sys/pghw.h> 491772Sjl139090 #endif 500Sstevel@tonic-gate 510Sstevel@tonic-gate /* BEGIN CSTYLED */ 520Sstevel@tonic-gate /* 530Sstevel@tonic-gate * trapstat: Trap Statistics through Dynamic Trap Table Interposition 540Sstevel@tonic-gate * ------------------------------------------------------------------- 550Sstevel@tonic-gate * 560Sstevel@tonic-gate * Motivation and Overview 570Sstevel@tonic-gate * 580Sstevel@tonic-gate * Despite being a fundamental indicator of system behavior, there has 590Sstevel@tonic-gate * historically been very little insight provided into the frequency and cost 600Sstevel@tonic-gate * of machine-specific traps. The lack of insight has been especially acute 610Sstevel@tonic-gate * on UltraSPARC microprocessors: because these microprocessors handle TLB 620Sstevel@tonic-gate * misses as software traps, the frequency and duration of traps play a 630Sstevel@tonic-gate * decisive role in the performance of the memory system. As applications have 640Sstevel@tonic-gate * increasingly outstripped TLB reach, this has become increasingly true. 650Sstevel@tonic-gate * 660Sstevel@tonic-gate * Part of the difficulty of observing trap behavior is that the trap handlers 670Sstevel@tonic-gate * are so frequently called (e.g. millions of times per second) that any 680Sstevel@tonic-gate * permanently enabled instrumentation would induce an unacceptable performance 690Sstevel@tonic-gate * degradation. Thus, it is a constraint on any trap observability 700Sstevel@tonic-gate * infrastructure that it have no probe effect when not explicitly enabled. 710Sstevel@tonic-gate * 720Sstevel@tonic-gate * The basic idea, then, is to create an interposing trap table in which each 730Sstevel@tonic-gate * entry increments a per-trap, in-memory counter and then jumps to the actual, 740Sstevel@tonic-gate * underlying trap table entry. To enable trapstat, we atomically write to the 750Sstevel@tonic-gate * trap base address (%tba) register to point to our interposing trap table. 760Sstevel@tonic-gate * (Note that per-CPU statistics fall out by creating a different trap table 770Sstevel@tonic-gate * for each CPU.) 780Sstevel@tonic-gate * 790Sstevel@tonic-gate * Implementation Details 800Sstevel@tonic-gate * 810Sstevel@tonic-gate * While the idea is straight-forward, a nuance of SPARC V9 slightly 820Sstevel@tonic-gate * complicates the implementation. Unlike its predecessors, SPARC V9 supports 830Sstevel@tonic-gate * the notion of nested traps. The trap level is kept in the TL register: 840Sstevel@tonic-gate * during normal operation it is 0; when a trap is taken, the TL register is 850Sstevel@tonic-gate * incremented by 1. To aid system software, SPARC V9 breaks the trap table 860Sstevel@tonic-gate * into two halves: the lower half contains the trap handlers for traps taken 870Sstevel@tonic-gate * when TL is 0; the upper half contains the trap handlers for traps taken 880Sstevel@tonic-gate * when TL is greater than 0. Each half is further subdivided into two 890Sstevel@tonic-gate * subsequent halves: the lower half contains the trap handlers for traps 900Sstevel@tonic-gate * other than those induced by the trap instruction (Tcc variants); the upper 910Sstevel@tonic-gate * half contains the trap handlers for traps induced by the trap instruction. 920Sstevel@tonic-gate * This gives a total of four ranges, with each range containing 256 traps: 930Sstevel@tonic-gate * 940Sstevel@tonic-gate * +--------------------------------+- 3ff 950Sstevel@tonic-gate * | | . 960Sstevel@tonic-gate * | Trap instruction, TL>0 | . 970Sstevel@tonic-gate * | | . 980Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 300 990Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 2ff 1000Sstevel@tonic-gate * | | . 1010Sstevel@tonic-gate * | Non-trap instruction, TL>0 | . 1020Sstevel@tonic-gate * | | . 1030Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 200 1040Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 1ff 1050Sstevel@tonic-gate * | | . 1060Sstevel@tonic-gate * | Trap instruction, TL=0 | . 1070Sstevel@tonic-gate * | | . 1080Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 100 1090Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 0ff 1100Sstevel@tonic-gate * | | . 1110Sstevel@tonic-gate * | Non-trap instruction, TL=0 | . 1120Sstevel@tonic-gate * | | . 1130Sstevel@tonic-gate * +--------------------------------+- 000 1140Sstevel@tonic-gate * 1150Sstevel@tonic-gate * 1160Sstevel@tonic-gate * Solaris, however, doesn't have reason to support trap instructions when 1170Sstevel@tonic-gate * TL>0 (only privileged code may execute at TL>0; not supporting this only 1180Sstevel@tonic-gate * constrains our own implementation). The trap table actually looks like: 1190Sstevel@tonic-gate * 1200Sstevel@tonic-gate * +--------------------------------+- 2ff 1210Sstevel@tonic-gate * | | . 1220Sstevel@tonic-gate * | Non-trap instruction, TL>0 | . 1230Sstevel@tonic-gate * | | . 1240Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 200 1250Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 1ff 1260Sstevel@tonic-gate * | | . 1270Sstevel@tonic-gate * | Trap instruction, TL=0 | . 1280Sstevel@tonic-gate * | | . 1290Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 100 1300Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 0ff 1310Sstevel@tonic-gate * | | . 1320Sstevel@tonic-gate * | Non-trap instruction, TL=0 | . 1330Sstevel@tonic-gate * | | . 1340Sstevel@tonic-gate * +--------------------------------+- 000 1350Sstevel@tonic-gate * 1360Sstevel@tonic-gate * Putatively to aid system software, SPARC V9 has the notion of multiple 1370Sstevel@tonic-gate * sets of global registers. UltraSPARC defines four sets of global 1380Sstevel@tonic-gate * registers: 1390Sstevel@tonic-gate * 1400Sstevel@tonic-gate * Normal Globals 1410Sstevel@tonic-gate * Alternate Globals (AGs) 1420Sstevel@tonic-gate * MMU Globals (MGs) 1430Sstevel@tonic-gate * Interrupt Globals (IGs) 1440Sstevel@tonic-gate * 1450Sstevel@tonic-gate * The set of globals in use is controlled by bits in PSTATE; when TL is 0 1460Sstevel@tonic-gate * (and PSTATE has not been otherwise explicitly modified), the Normal Globals 1470Sstevel@tonic-gate * are in use. When a trap is issued, PSTATE is modified to point to a set of 1480Sstevel@tonic-gate * globals corresponding to the trap type. Most traps correspond to the 1490Sstevel@tonic-gate * Alternate Globals, with a minority corresponding to the MMU Globals, and 1500Sstevel@tonic-gate * only the interrupt-vector trap (vector 0x60) corresponding to the Interrupt 1510Sstevel@tonic-gate * Globals. (The complete mapping can be found in the UltraSPARC I&II User's 1520Sstevel@tonic-gate * Manual.) 1530Sstevel@tonic-gate * 1540Sstevel@tonic-gate * Note that the sets of globals are per trap _type_, not per trap _level_. 1550Sstevel@tonic-gate * Thus, when executing a TL>0 trap handler, one may not have registers 1560Sstevel@tonic-gate * available (for example, both trap-instruction traps and spill traps execute 1570Sstevel@tonic-gate * on the alternate globals; if a trap-instruction trap induces a window spill, 1580Sstevel@tonic-gate * the window spill handler has no available globals). For trapstat, this is 1590Sstevel@tonic-gate * problematic: a register is required to transfer control from one arbitrary 1600Sstevel@tonic-gate * location (in the interposing trap table) to another (in the actual trap 1610Sstevel@tonic-gate * table). 1620Sstevel@tonic-gate * 1630Sstevel@tonic-gate * We solve this problem by exploiting the trap table's location at the bottom 1640Sstevel@tonic-gate * of valid kernel memory (i.e. at KERNELBASE). We locate the interposing trap 1650Sstevel@tonic-gate * tables just below KERNELBASE -- thereby allowing us to use a branch-always 1660Sstevel@tonic-gate * instruction (ba) instead of a jump instruction (jmp) to transfer control 1670Sstevel@tonic-gate * from the TL>0 entries in the interposing trap table to the TL>0 entries in 1680Sstevel@tonic-gate * the actual trap table. (N.B. while this allows trap table interposition to 1690Sstevel@tonic-gate * work, it necessarily limits trapstat to only recording information about 1700Sstevel@tonic-gate * TL=0 traps -- there is no way to increment a counter without using a 1710Sstevel@tonic-gate * register.) Diagrammatically: 1720Sstevel@tonic-gate * 1730Sstevel@tonic-gate * Actual trap table: 1740Sstevel@tonic-gate * 1750Sstevel@tonic-gate * +--------------------------------+- 2ff 1760Sstevel@tonic-gate * | | . 1770Sstevel@tonic-gate * | Non-trap instruction, TL>0 | . <-----------------------+ 1780Sstevel@tonic-gate * | | . <-----------------------|-+ 1790Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 200 <-----------------------|-|-+ 1800Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 1ff | | | 1810Sstevel@tonic-gate * | | . | | | 1820Sstevel@tonic-gate * | Trap instruction, TL=0 | . <-----------------+ | | | 1830Sstevel@tonic-gate * | | . <-----------------|-+ | | | 1840Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 100 <-----------------|-|-+ | | | 1850Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 0ff | | | | | | 1860Sstevel@tonic-gate * | | . | | | | | | 1870Sstevel@tonic-gate * | Non-trap instruction, TL=0 | . <-----------+ | | | | | | 1880Sstevel@tonic-gate * | | . <-----------|-+ | | | | | | 1890Sstevel@tonic-gate * +--------------------------------+- 000 <-----------|-|-+ | | | | | | 1900Sstevel@tonic-gate * KERNELBASE | | | | | | | | | 1910Sstevel@tonic-gate * | | | | | | | | | 1920Sstevel@tonic-gate * | | | | | | | | | 1930Sstevel@tonic-gate * Interposing trap table: | | | | | | | | | 1940Sstevel@tonic-gate * | | | | | | | | | 1950Sstevel@tonic-gate * +--------------------------------+- 2ff | | | | | | | | | 1960Sstevel@tonic-gate * | ... | . | | | | | | | | | 1970Sstevel@tonic-gate * | ... | . | | | | | | | | | 1980Sstevel@tonic-gate * | ... | . | | | | | | | | | 1990Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 203 | | | | | | | | | 2000Sstevel@tonic-gate * | ba,a | -------------|-|-|-|-|-|-+ | | 2010Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 202 | | | | | | | | 2020Sstevel@tonic-gate * | ba,a | -------------|-|-|-|-|-|---+ | 2030Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 201 | | | | | | | 2040Sstevel@tonic-gate * | ba,a | -------------|-|-|-|-|-|-----+ 2050Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 200 | | | | | | 2060Sstevel@tonic-gate * | ... | . | | | | | | 2070Sstevel@tonic-gate * | ... | . | | | | | | 2080Sstevel@tonic-gate * | ... | . | | | | | | 2090Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 103 | | | | | | 2100Sstevel@tonic-gate * | (Increment counter) | | | | | | | 2110Sstevel@tonic-gate * | ba,a | -------------------+ | | 2120Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 102 | | | | | 2130Sstevel@tonic-gate * | (Increment counter) | | | | | | 2140Sstevel@tonic-gate * | ba,a | ---------------------+ | 2150Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 101 | | | | 2160Sstevel@tonic-gate * | (Increment counter) | | | | | 2170Sstevel@tonic-gate * | ba,a | -----------------------+ 2180Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 100 | | | 2190Sstevel@tonic-gate * | ... | . | | | 2200Sstevel@tonic-gate * | ... | . | | | 2210Sstevel@tonic-gate * | ... | . | | | 2220Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 003 | | | 2230Sstevel@tonic-gate * | (Increment counter) | | | | 2240Sstevel@tonic-gate * | ba,a | -------------+ | | 2250Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 002 | | 2260Sstevel@tonic-gate * | (Increment counter) | | | 2270Sstevel@tonic-gate * | ba,a | ---------------+ | 2280Sstevel@tonic-gate * |- - - - - - - - - - - - - - - - +- 001 | 2290Sstevel@tonic-gate * | (Increment counter) | | 2300Sstevel@tonic-gate * | ba,a | -----------------+ 2310Sstevel@tonic-gate * +--------------------------------+- 000 2320Sstevel@tonic-gate * KERNELBASE - tstat_total_size 2330Sstevel@tonic-gate * 2340Sstevel@tonic-gate * tstat_total_size is the number of pages required for each trap table. It 2350Sstevel@tonic-gate * must be true that KERNELBASE - tstat_total_size is less than the maximum 2360Sstevel@tonic-gate * branch displacement; if each CPU were to consume a disjoint virtual range 2370Sstevel@tonic-gate * below KERNELBASE for its trap table, we could support at most 2380Sstevel@tonic-gate * (maximum_branch_displacement / tstat_total_size) CPUs. The maximum branch 2390Sstevel@tonic-gate * displacement for Bicc variants is just under eight megabytes, and (because 2400Sstevel@tonic-gate * the %tba must be 32K aligned), tstat_total_size must be at least 32K; if 2410Sstevel@tonic-gate * each CPU were to consume a disjoint virtual range, we would have an 2420Sstevel@tonic-gate * unacceptably low upper bound of 256 CPUs. 2430Sstevel@tonic-gate * 2440Sstevel@tonic-gate * While there are tricks that one could use to address this constraint (e.g., 2450Sstevel@tonic-gate * creating trampolines every maximum_branch_displacement bytes), we instead 2460Sstevel@tonic-gate * solve this by not permitting each CPU to consume a disjoint virtual range. 2470Sstevel@tonic-gate * Rather, we have each CPU's interposing trap table use the _same_ virtual 2480Sstevel@tonic-gate * range, but we back the trap tables with disjoint physical memory. Normally, 2490Sstevel@tonic-gate * such one-to-many virtual-to-physical mappings are illegal; this is 2500Sstevel@tonic-gate * permissible here only because the pages for the interposing trap table are 2510Sstevel@tonic-gate * necessarily locked in the TLB. (The CPUs thus never have the opportunity to 2520Sstevel@tonic-gate * discover that they have conflicting translations.) 2530Sstevel@tonic-gate * 2540Sstevel@tonic-gate * On CMT architectures in which CPUs can share MMUs, the above trick will not 2550Sstevel@tonic-gate * work: two CPUs that share an MMU cannot have the same virtual address map 2560Sstevel@tonic-gate * to disjoint physical pages. On these architectures, any CPUs sharing the 2570Sstevel@tonic-gate * same MMU must consume a disjoint 32K virtual address range -- limiting the 2580Sstevel@tonic-gate * number of CPUs sharing an MMU on these architectures to 256 due to the 2590Sstevel@tonic-gate * branch displacement limitation described above. On the sun4v architecture, 2600Sstevel@tonic-gate * there is a further limitation: a guest may not have more than eight locked 2610Sstevel@tonic-gate * TLB entries per MMU. To allow operation under this restriction, the 2620Sstevel@tonic-gate * interposing trap table and the trap statistics are each accessed through 2630Sstevel@tonic-gate * a single 4M TLB entry. This limits the footprint to two locked entries 2640Sstevel@tonic-gate * (one for the I-TLB and one for the D-TLB), but further restricts the number 2650Sstevel@tonic-gate * of CPUs to 128 per MMU. However, support for more than 128 CPUs can easily 2660Sstevel@tonic-gate * be added via a hybrid scheme, where the same 4M virtual address is used 2670Sstevel@tonic-gate * on different MMUs. 2680Sstevel@tonic-gate * 2694732Sdavemq * On sun4v architecture, we currently don't use hybrid scheme as it imposes 2704732Sdavemq * additional restriction on live migration and transparent CPU replacement. 2714732Sdavemq * Instead, we increase the number of supported CPUs by reducing the virtual 2724732Sdavemq * address space requirements per CPU via shared interposing trap table as 2734732Sdavemq * follows: 2744732Sdavemq * 2754732Sdavemq * Offset (within 4MB page) 2764732Sdavemq * +------------------------------------+- 0x400000 2774732Sdavemq * | CPU 507 trap statistics (8KB) | . 2784732Sdavemq * |- - - - - - - - - - - - - - - - - - +- 0x3fe000 2794732Sdavemq * | | 2804732Sdavemq * | ... | 2814732Sdavemq * | | 2824732Sdavemq * |- - - - - - - - - - - - - - - - - - +- 0x00c000 2834732Sdavemq * | CPU 1 trap statistics (8KB) | . 2844732Sdavemq * |- - - - - - - - - - - - - - - - - - +- 0x00a000 2854732Sdavemq * | CPU 0 trap statistics (8KB) | . 2864732Sdavemq * |- - - - - - - - - - - - - - - - - - +- 0x008000 2874732Sdavemq * | Shared trap handler continuation | . 2884732Sdavemq * |- - - - - - - - - - - - - - - - - - +- 0x006000 2894732Sdavemq * | Non-trap instruction, TL>0 | . 2904732Sdavemq * |- - - - - - - - - - - - - - - - - - +- 0x004000 2914732Sdavemq * | Trap instruction, TL=0 | . 2924732Sdavemq * |- - - - - - - - - - - - - - - - - - +- 0x002000 2934732Sdavemq * | Non-trap instruction, TL=0 | . 2944732Sdavemq * +------------------------------------+- 0x000000 2954732Sdavemq * 2964732Sdavemq * Note that each CPU has its own 8K space for its trap statistics but 2974732Sdavemq * shares the same interposing trap handlers. Interposing trap handlers 2984732Sdavemq * use the CPU ID to determine the location of per CPU trap statistics 2994732Sdavemq * area dynamically. This increases the interposing trap handler overhead, 3004732Sdavemq * but is acceptable as it allows us to support up to 508 CPUs with one 3014732Sdavemq * 4MB page on sun4v architecture. Support for additional CPUs can be 3024732Sdavemq * added via hybrid scheme as mentioned earlier. 3030Sstevel@tonic-gate * 3040Sstevel@tonic-gate * TLB Statistics 3050Sstevel@tonic-gate * 3060Sstevel@tonic-gate * Because TLB misses are an important component of system performance, we wish 3070Sstevel@tonic-gate * to know much more about these traps than simply the number received. 3080Sstevel@tonic-gate * Specifically, we wish to know: 3090Sstevel@tonic-gate * 3100Sstevel@tonic-gate * (a) The amount of time spent executing the TLB miss handler 3110Sstevel@tonic-gate * (b) TLB misses versus TSB misses 3120Sstevel@tonic-gate * (c) Kernel-level misses versus user-level misses 3130Sstevel@tonic-gate * (d) Misses per pagesize 3140Sstevel@tonic-gate * 3150Sstevel@tonic-gate * TLB Statistics: Time Spent Executing 3160Sstevel@tonic-gate * 3170Sstevel@tonic-gate * To accurately determine the amount of time spent executing the TLB miss 3180Sstevel@tonic-gate * handler, one must get a timestamp on trap entry and trap exit, subtract the 3190Sstevel@tonic-gate * latter from the former, and add the result to an accumulating count. 3200Sstevel@tonic-gate * Consider flow of control during normal TLB miss processing (where "ldx 3210Sstevel@tonic-gate * [%g2], %g2" is an arbitrary TLB-missing instruction): 3220Sstevel@tonic-gate * 3230Sstevel@tonic-gate * + - - - - - - - -+ 3240Sstevel@tonic-gate * : : 3250Sstevel@tonic-gate * : ldx [%g2], %g2 :<-------------------------------------------------------+ 3260Sstevel@tonic-gate * : : Return from trap: | 3270Sstevel@tonic-gate * + - - - - - - - -+ TL <- TL - 1 (0) | 3280Sstevel@tonic-gate * | %pc <- TSTATE[TL].TPC (address of load) | 3290Sstevel@tonic-gate * | TLB miss: | 3300Sstevel@tonic-gate * | TL <- TL + 1 (1) | 3310Sstevel@tonic-gate * | %pc <- TLB-miss-trap-handler | 3320Sstevel@tonic-gate * | | 3330Sstevel@tonic-gate * v | 3340Sstevel@tonic-gate * + - - - - - - - - - - - - - - - + | 3350Sstevel@tonic-gate * : : | 3360Sstevel@tonic-gate * : Lookup VA in TSB : | 3370Sstevel@tonic-gate * : If (hit) : | 3380Sstevel@tonic-gate * : Fill TLB : | 3390Sstevel@tonic-gate * : Else : | 3400Sstevel@tonic-gate * : Lookup VA (hme hash table : | 3410Sstevel@tonic-gate * : or segkpm) : | 3420Sstevel@tonic-gate * : Fill TLB : | 3430Sstevel@tonic-gate * : Endif : | 3440Sstevel@tonic-gate * : Issue "retry" ---------------------------------------------------------+ 3450Sstevel@tonic-gate * : : 3460Sstevel@tonic-gate * + - - - - - - - - - - - - - - - + 3470Sstevel@tonic-gate * TLB-miss-trap-handler 3480Sstevel@tonic-gate * 3490Sstevel@tonic-gate * 3500Sstevel@tonic-gate * As the above diagram indicates, interposing on the trap table allows one 3510Sstevel@tonic-gate * only to determine a timestamp on trap _entry_: when the TLB miss handler 3520Sstevel@tonic-gate * has completed filling the TLB, a "retry" will be issued, and control will 3530Sstevel@tonic-gate * transfer immediately back to the missing %pc. 3540Sstevel@tonic-gate * 3550Sstevel@tonic-gate * To obtain a timestamp on trap exit, we must then somehow interpose between 3560Sstevel@tonic-gate * the "retry" and the subsequent control transfer to the TLB-missing 3570Sstevel@tonic-gate * instruction. To do this, we _push_ a trap level. The basic idea is to 3580Sstevel@tonic-gate * spoof a TLB miss by raising TL, setting the %tpc to be within text 3590Sstevel@tonic-gate * controlled by trapstat (the "TLB return entry") and branching to the 3600Sstevel@tonic-gate * underlying TLB miss handler. When the TLB miss handler issues its "retry", 3610Sstevel@tonic-gate * control will transfer not to the TLB-missing instruction, but rather to the 3620Sstevel@tonic-gate * TLB return entry. This code can then obtain a timestamp, and issue its own 3630Sstevel@tonic-gate * "retry" -- thereby correctly returning to the TLB-missing instruction. 3640Sstevel@tonic-gate * Here is the above TLB miss flow control diagram modified to reflect 3650Sstevel@tonic-gate * trapstat's operation: 3660Sstevel@tonic-gate * 3670Sstevel@tonic-gate * + - - - - - - - -+ 3680Sstevel@tonic-gate * : : 3690Sstevel@tonic-gate * : ldx [%g2], %g2 :<-------------------------------------------------------+ 3700Sstevel@tonic-gate * : : Return from trap: | 3710Sstevel@tonic-gate * + - - - - - - - -+ TL <- TL - 1 (0) | 3720Sstevel@tonic-gate * | %pc <- TSTATE[TL].TPC (address of load) | 3730Sstevel@tonic-gate * | TLB miss: | 3740Sstevel@tonic-gate * | TL <- TL + 1 (1) | 3750Sstevel@tonic-gate * | %pc <- TLB-miss-trap-handler (trapstat) | 3760Sstevel@tonic-gate * | | 3770Sstevel@tonic-gate * v TLB-return-entry (trapstat) | 3780Sstevel@tonic-gate * + - - - - - - - - - - - - - - - - - - + + - - - - - - - - - - - - - + | 3790Sstevel@tonic-gate * : : : : | 3800Sstevel@tonic-gate * : Record timestamp : : Record timestamp : | 3810Sstevel@tonic-gate * : TL <- 2 : : Take timestamp difference : | 3820Sstevel@tonic-gate * : TSTATE[1].TPC <- TLB-return-entry : : Add to running total : | 3830Sstevel@tonic-gate * : ba,a TLB-miss-trap-handler -----------+ : Issue "retry" --------------+ 3840Sstevel@tonic-gate * : : | : : 3850Sstevel@tonic-gate * + - - - - - - - - - - - - - - - - - - + | + - - - - - - - - - - - - - + 3860Sstevel@tonic-gate * TLB-miss-trap-handler | ^ 3870Sstevel@tonic-gate * (trapstat) | | 3880Sstevel@tonic-gate * | | 3890Sstevel@tonic-gate * | | 3900Sstevel@tonic-gate * +-----------------------+ | 3910Sstevel@tonic-gate * | | 3920Sstevel@tonic-gate * | | 3930Sstevel@tonic-gate * v | 3940Sstevel@tonic-gate * + - - - - - - - - - - - - - - - + | 3950Sstevel@tonic-gate * : : | 3960Sstevel@tonic-gate * : Lookup VA in TSB : | 3970Sstevel@tonic-gate * : If (hit) : | 3980Sstevel@tonic-gate * : Fill TLB : | 3990Sstevel@tonic-gate * : Else : | 4000Sstevel@tonic-gate * : Lookup VA (hme hash table : | 4010Sstevel@tonic-gate * : or segkpm) : | 4020Sstevel@tonic-gate * : Fill TLB : | 4030Sstevel@tonic-gate * : Endif : | 4040Sstevel@tonic-gate * : Issue "retry" ------------------------------------------+ 4050Sstevel@tonic-gate * : : Return from trap: 4060Sstevel@tonic-gate * + - - - - - - - - - - - - - - - + TL <- TL - 1 (1) 4070Sstevel@tonic-gate * TLB-miss-trap-handler %pc <- TSTATE[TL].TPC (TLB-return-entry) 4080Sstevel@tonic-gate * 4090Sstevel@tonic-gate * 4100Sstevel@tonic-gate * A final subterfuge is required to complete our artifice: if we miss in 4110Sstevel@tonic-gate * the TLB, the TSB _and_ the subsequent hash or segkpm lookup (that is, if 4120Sstevel@tonic-gate * there is no valid translation for the TLB-missing address), common system 4130Sstevel@tonic-gate * software will need to accurately determine the %tpc as part of its page 4140Sstevel@tonic-gate * fault handling. We therefore modify the kernel to check the %tpc in this 4150Sstevel@tonic-gate * case: if the %tpc falls within the VA range controlled by trapstat and 4160Sstevel@tonic-gate * the TL is 2, TL is simply lowered back to 1 (this check is implemented 4170Sstevel@tonic-gate * by the TSTAT_CHECK_TL1 macro). Lowering TL to 1 has the effect of 4180Sstevel@tonic-gate * discarding the state pushed by trapstat. 4190Sstevel@tonic-gate * 4200Sstevel@tonic-gate * TLB Statistics: TLB Misses versus TSB Misses 4210Sstevel@tonic-gate * 4220Sstevel@tonic-gate * Distinguishing TLB misses from TSB misses requires further interposition 4230Sstevel@tonic-gate * on the TLB miss handler: we cannot know a priori or a posteriori if a 4240Sstevel@tonic-gate * given VA will or has hit in the TSB. 4250Sstevel@tonic-gate * 4260Sstevel@tonic-gate * We achieve this distinction by adding a second TLB return entry almost 4270Sstevel@tonic-gate * identical to the first -- differing only in the address to which it 4280Sstevel@tonic-gate * stores its results. We then modify the TLB miss handlers of the kernel 4290Sstevel@tonic-gate * such that they check the %tpc when they determine that a TLB miss has 4300Sstevel@tonic-gate * subsequently missed in the TSB: if the %tpc lies within trapstat's VA 4310Sstevel@tonic-gate * range and TL is 2 (that is, if trapstat is running), the TLB miss handler 4320Sstevel@tonic-gate * _increments_ the %tpc by the size of the TLB return entry. The ensuing 4330Sstevel@tonic-gate * "retry" will thus transfer control to the second TLB return entry, and 4340Sstevel@tonic-gate * the time spent in the handler will be accumulated in a memory location 4350Sstevel@tonic-gate * specific to TSB misses. 4360Sstevel@tonic-gate * 4370Sstevel@tonic-gate * N.B.: To minimize the amount of knowledge the kernel must have of trapstat, 4380Sstevel@tonic-gate * we do not allow the kernel to hard-code the size of the TLB return entry. 4390Sstevel@tonic-gate * Rather, the actual tsbmiss handler executes a known instruction at the 4400Sstevel@tonic-gate * corresponding tsbmiss patch points (see the tstat_tsbmiss_patch_table) with 4410Sstevel@tonic-gate * the %tpc in %g7: when trapstat is not running, these points contain the 4420Sstevel@tonic-gate * harmless TSTAT_TSBMISS_INSTR instruction ("add %g7, 0, %g7"). Before 4430Sstevel@tonic-gate * running, trapstat modifies the instructions at these patch points such 4440Sstevel@tonic-gate * that the simm13 equals the size of the TLB return entry. 4450Sstevel@tonic-gate * 4460Sstevel@tonic-gate * TLB Statistics: Kernel-level Misses versus User-level Misses 4470Sstevel@tonic-gate * 4480Sstevel@tonic-gate * Differentiating user-level misses from kernel-level misses employs a 4490Sstevel@tonic-gate * similar technique, but is simplified by the ability to distinguish a 4500Sstevel@tonic-gate * user-level miss from a kernel-level miss a priori by reading the context 4510Sstevel@tonic-gate * register: we implement kernel-/user-level differentiation by again doubling 4520Sstevel@tonic-gate * the number of TLB return entries, and setting the %tpc to the appropriate 4530Sstevel@tonic-gate * TLB return entry in trapstat's TLB miss handler. Together with the doubling 4540Sstevel@tonic-gate * of entries required for TLB-miss/TSB-miss differentiation, this yields a 4550Sstevel@tonic-gate * total of four TLB return entries: 4560Sstevel@tonic-gate * 4570Sstevel@tonic-gate * Level TSB hit? Structure member 4580Sstevel@tonic-gate * ------------------------------------------------------------ 4590Sstevel@tonic-gate * Kernel Yes tstat_tlbret_t.ttlbr_ktlb 4600Sstevel@tonic-gate * Kernel No tstat_tlbret_t.ttlbr_ktsb 4610Sstevel@tonic-gate * User Yes tstat_tlbret_t.ttlbr_utlb 4620Sstevel@tonic-gate * User No tstat_tlbret_t.ttlbr_utsb 4630Sstevel@tonic-gate * 4640Sstevel@tonic-gate * TLB Statistics: Misses per Pagesize 4650Sstevel@tonic-gate * 4660Sstevel@tonic-gate * As with the TLB-/TSB-miss differentiation, we have no way of determining 4670Sstevel@tonic-gate * pagesize a priori. This is therefore implemented by mandating a new rule: 4680Sstevel@tonic-gate * whenever the kernel fills the TLB in its TLB miss handler, the TTE 4690Sstevel@tonic-gate * corresponding to the TLB-missing VA must be in %g5 when the handler 4700Sstevel@tonic-gate * executes its "retry". This allows the TLB return entry to determine 4710Sstevel@tonic-gate * pagesize by simply looking at the pagesize field in the TTE stored in 4720Sstevel@tonic-gate * %g5. 4730Sstevel@tonic-gate * 4740Sstevel@tonic-gate * TLB Statistics: Probe Effect 4750Sstevel@tonic-gate * 4760Sstevel@tonic-gate * As one might imagine, gathering TLB statistics by pushing a trap level 4770Sstevel@tonic-gate * induces significant probe effect. To account for this probe effect, 4780Sstevel@tonic-gate * trapstat attempts to observe it by executing a code sequence with a known 4790Sstevel@tonic-gate * number of TLB misses both before and after interposing on the trap table. 4800Sstevel@tonic-gate * This allows trapstat to determine a per-trap probe effect which can then be 4810Sstevel@tonic-gate * factored into the "%tim" fields of the trapstat command. 4820Sstevel@tonic-gate * 4830Sstevel@tonic-gate * Note that on sun4v platforms, TLB misses are normally handled by the 4840Sstevel@tonic-gate * hypervisor or the hardware TSB walker. Thus no fast MMU miss information 4851050Sgirish * is reported for normal operation. However, when trapstat is invoked 4861050Sgirish * with -t or -T option to collect detailed TLB statistics, kernel takes 4870Sstevel@tonic-gate * over TLB miss handling. This results in significantly more overhead 4880Sstevel@tonic-gate * and TLB statistics may not be as accurate as on sun4u platforms. 4891050Sgirish * On some processors, hypervisor or hardware may provide a low overhead 4901050Sgirish * interface to collect TSB hit statistics. This support is exposed via 4911050Sgirish * a well defined CPU module interface (cpu_trapstat_conf to enable this 4921050Sgirish * interface and cpu_trapstat_data to get detailed TSB hit statistics). 4931050Sgirish * In this scenario, TSB miss statistics is collected by intercepting the 4941050Sgirish * IMMU_miss and DMMU_miss traps using above mentioned trap interposition 4951050Sgirish * approach. 4960Sstevel@tonic-gate * 4970Sstevel@tonic-gate * Locking 4980Sstevel@tonic-gate * 4990Sstevel@tonic-gate * The implementation uses two locks: tstat_lock (a local lock) and the global 5000Sstevel@tonic-gate * cpu_lock. tstat_lock is used to assure trapstat's consistency in the 5010Sstevel@tonic-gate * presence of multithreaded /dev/trapstat consumers (while as of this writing 5020Sstevel@tonic-gate * the only consumer of /dev/trapstat is single threaded, it is obviously 5030Sstevel@tonic-gate * necessary to correctly support multithreaded access). cpu_lock is held 5040Sstevel@tonic-gate * whenever CPUs are being manipulated directly, to prevent them from 5050Sstevel@tonic-gate * disappearing in the process. Because trapstat's DR callback 5060Sstevel@tonic-gate * (trapstat_cpu_setup()) must grab tstat_lock and is called with cpu_lock 5070Sstevel@tonic-gate * held, the lock ordering is necessarily cpu_lock before tstat_lock. 5080Sstevel@tonic-gate * 5090Sstevel@tonic-gate */ 5100Sstevel@tonic-gate /* END CSTYLED */ 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate static dev_info_t *tstat_devi; /* saved in xxattach() for xxinfo() */ 5130Sstevel@tonic-gate static int tstat_open; /* set if driver is open */ 5140Sstevel@tonic-gate static kmutex_t tstat_lock; /* serialize access */ 5150Sstevel@tonic-gate static vmem_t *tstat_arena; /* arena for TLB-locked pages */ 5160Sstevel@tonic-gate static tstat_percpu_t *tstat_percpu; /* per-CPU data */ 5170Sstevel@tonic-gate static int tstat_running; /* set if trapstat is running */ 5180Sstevel@tonic-gate static tstat_data_t *tstat_buffer; /* staging buffer for outgoing data */ 5190Sstevel@tonic-gate static int tstat_options; /* bit-wise indication of options */ 5200Sstevel@tonic-gate static int *tstat_enabled; /* map of enabled trap entries */ 5210Sstevel@tonic-gate static int tstat_tsbmiss_patched; /* tsbmiss patch flag */ 5220Sstevel@tonic-gate static callb_id_t tstat_cprcb; /* CPR callback */ 5230Sstevel@tonic-gate static char *tstat_probe_area; /* VA range used for probe effect */ 5240Sstevel@tonic-gate static caddr_t tstat_probe_phys; /* physical to back above VA */ 5250Sstevel@tonic-gate static hrtime_t tstat_probe_time; /* time spent on probe effect */ 5260Sstevel@tonic-gate static hrtime_t tstat_probe_before[TSTAT_PROBE_NLAPS]; 5270Sstevel@tonic-gate static hrtime_t tstat_probe_after[TSTAT_PROBE_NLAPS]; 5280Sstevel@tonic-gate static uint_t tstat_pgszs; /* # of kernel page sizes */ 5290Sstevel@tonic-gate static uint_t tstat_user_pgszs; /* # of user page sizes */ 5300Sstevel@tonic-gate 5310Sstevel@tonic-gate /* 5320Sstevel@tonic-gate * sizeof tstat_data_t + pgsz data for the kernel. For simplicity's sake, when 5330Sstevel@tonic-gate * we collect data, we do it based upon szc, but when we report data back to 5340Sstevel@tonic-gate * userland, we have to do it based upon the userszc which may not match. 5350Sstevel@tonic-gate * So, these two variables are for internal use and exported use respectively. 5360Sstevel@tonic-gate */ 5370Sstevel@tonic-gate static size_t tstat_data_t_size; 5380Sstevel@tonic-gate static size_t tstat_data_t_exported_size; 5390Sstevel@tonic-gate 5404732Sdavemq #ifndef sun4v 5414732Sdavemq 5420Sstevel@tonic-gate static size_t tstat_data_pages; /* number of pages of tstat data */ 5430Sstevel@tonic-gate static size_t tstat_data_size; /* tstat data size in bytes */ 5440Sstevel@tonic-gate static size_t tstat_total_pages; /* #data pages + #instr pages */ 5450Sstevel@tonic-gate static size_t tstat_total_size; /* tstat data size + instr size */ 5464732Sdavemq 5474732Sdavemq #else /* sun4v */ 5484732Sdavemq 5490Sstevel@tonic-gate static caddr_t tstat_va; /* VA of memory reserved for TBA */ 5500Sstevel@tonic-gate static pfn_t tstat_pfn; /* PFN of memory reserved for TBA */ 5511050Sgirish static boolean_t tstat_fast_tlbstat = B_FALSE; 5524732Sdavemq static int tstat_traptab_initialized; 5534732Sdavemq 5544732Sdavemq #endif /* sun4v */ 5550Sstevel@tonic-gate 5560Sstevel@tonic-gate /* 5570Sstevel@tonic-gate * In the above block comment, see "TLB Statistics: TLB Misses versus 5580Sstevel@tonic-gate * TSB Misses" for an explanation of the tsbmiss patch points. 5590Sstevel@tonic-gate */ 5600Sstevel@tonic-gate extern uint32_t tsbmiss_trapstat_patch_point; 5610Sstevel@tonic-gate extern uint32_t tsbmiss_trapstat_patch_point_kpm; 5620Sstevel@tonic-gate extern uint32_t tsbmiss_trapstat_patch_point_kpm_small; 5630Sstevel@tonic-gate 5640Sstevel@tonic-gate /* 5650Sstevel@tonic-gate * Trapstat tsbmiss patch table 5660Sstevel@tonic-gate */ 5670Sstevel@tonic-gate tstat_tsbmiss_patch_entry_t tstat_tsbmiss_patch_table[] = { 5680Sstevel@tonic-gate {(uint32_t *)&tsbmiss_trapstat_patch_point, 0}, 5690Sstevel@tonic-gate {(uint32_t *)&tsbmiss_trapstat_patch_point_kpm, 0}, 5700Sstevel@tonic-gate {(uint32_t *)&tsbmiss_trapstat_patch_point_kpm_small, 0}, 5710Sstevel@tonic-gate {(uint32_t *)NULL, 0} 5720Sstevel@tonic-gate }; 5730Sstevel@tonic-gate 5740Sstevel@tonic-gate /* 5750Sstevel@tonic-gate * We define some general SPARC-specific constants to allow more readable 5760Sstevel@tonic-gate * relocations. 5770Sstevel@tonic-gate */ 5780Sstevel@tonic-gate #define NOP 0x01000000 5790Sstevel@tonic-gate #define HI22(v) ((uint32_t)(v) >> 10) 5800Sstevel@tonic-gate #define LO10(v) ((uint32_t)(v) & 0x3ff) 5810Sstevel@tonic-gate #define LO12(v) ((uint32_t)(v) & 0xfff) 5820Sstevel@tonic-gate #define DISP22(from, to) \ 5830Sstevel@tonic-gate ((((uintptr_t)(to) - (uintptr_t)(from)) >> 2) & 0x3fffff) 5840Sstevel@tonic-gate #define ASI(asi) ((asi) << 5) 5850Sstevel@tonic-gate 5860Sstevel@tonic-gate /* 5870Sstevel@tonic-gate * The interposing trap table must be locked in the I-TLB, and any data 5880Sstevel@tonic-gate * referred to in the interposing trap handler must be locked in the D-TLB. 5890Sstevel@tonic-gate * This function locks these pages in the appropriate TLBs by creating TTEs 5900Sstevel@tonic-gate * from whole cloth, and manually loading them into the TLB. This function is 5910Sstevel@tonic-gate * called from cross call context. 5920Sstevel@tonic-gate * 5930Sstevel@tonic-gate * On sun4v platforms, we use 4M page size mappings to minimize the number 5940Sstevel@tonic-gate * of locked down entries (i.e. permanent mappings). Each CPU uses a 5950Sstevel@tonic-gate * reserved portion of that 4M page for its TBA and data. 5960Sstevel@tonic-gate */ 5970Sstevel@tonic-gate static void 5980Sstevel@tonic-gate trapstat_load_tlb(void) 5990Sstevel@tonic-gate { 6000Sstevel@tonic-gate #ifndef sun4v 6010Sstevel@tonic-gate int i; 6020Sstevel@tonic-gate #else 6030Sstevel@tonic-gate uint64_t ret; 6040Sstevel@tonic-gate #endif 6050Sstevel@tonic-gate tte_t tte; 6060Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id]; 6070Sstevel@tonic-gate caddr_t va = tcpu->tcpu_vabase; 6080Sstevel@tonic-gate 6090Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED); 6100Sstevel@tonic-gate ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED)); 6110Sstevel@tonic-gate 6120Sstevel@tonic-gate #ifndef sun4v 6130Sstevel@tonic-gate for (i = 0; i < tstat_total_pages; i++, va += MMU_PAGESIZE) { 6140Sstevel@tonic-gate tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) | 6154732Sdavemq TTE_PFN_INTHI(tcpu->tcpu_pfn[i]); 6160Sstevel@tonic-gate if (i < TSTAT_INSTR_PAGES) { 6170Sstevel@tonic-gate tte.tte_intlo = TTE_PFN_INTLO(tcpu->tcpu_pfn[i]) | 6184732Sdavemq TTE_LCK_INT | TTE_CP_INT | TTE_PRIV_INT; 6192241Shuah sfmmu_itlb_ld_kva(va, &tte); 6200Sstevel@tonic-gate } else { 6210Sstevel@tonic-gate tte.tte_intlo = TTE_PFN_INTLO(tcpu->tcpu_pfn[i]) | 6224732Sdavemq TTE_LCK_INT | TTE_CP_INT | TTE_CV_INT | 6234732Sdavemq TTE_PRIV_INT | TTE_HWWR_INT; 6242241Shuah sfmmu_dtlb_ld_kva(va, &tte); 6250Sstevel@tonic-gate } 6260Sstevel@tonic-gate } 6270Sstevel@tonic-gate #else /* sun4v */ 6280Sstevel@tonic-gate tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(tstat_pfn); 6290Sstevel@tonic-gate tte.tte_intlo = TTE_PFN_INTLO(tstat_pfn) | TTE_CP_INT | 6304732Sdavemq TTE_CV_INT | TTE_PRIV_INT | TTE_HWWR_INT | 6314732Sdavemq TTE_SZ_INTLO(TTE4M); 6320Sstevel@tonic-gate ret = hv_mmu_map_perm_addr(va, KCONTEXT, *(uint64_t *)&tte, 6334732Sdavemq MAP_ITLB | MAP_DTLB); 6340Sstevel@tonic-gate 6350Sstevel@tonic-gate if (ret != H_EOK) 6360Sstevel@tonic-gate cmn_err(CE_PANIC, "trapstat: cannot map new TBA " 6370Sstevel@tonic-gate "for cpu %d (error: 0x%lx)", CPU->cpu_id, ret); 6380Sstevel@tonic-gate #endif /* sun4v */ 6390Sstevel@tonic-gate } 6400Sstevel@tonic-gate 6410Sstevel@tonic-gate /* 6420Sstevel@tonic-gate * As mentioned in the "TLB Statistics: TLB Misses versus TSB Misses" section 6430Sstevel@tonic-gate * of the block comment, TLB misses are differentiated from TSB misses in 6440Sstevel@tonic-gate * part by hot-patching the instructions at the tsbmiss patch points (see 6450Sstevel@tonic-gate * tstat_tsbmiss_patch_table). This routine is used both to initially patch 6460Sstevel@tonic-gate * the instructions, and to patch them back to their original values upon 6470Sstevel@tonic-gate * restoring the original trap table. 6480Sstevel@tonic-gate */ 6490Sstevel@tonic-gate static void 6500Sstevel@tonic-gate trapstat_hotpatch() 6510Sstevel@tonic-gate { 6520Sstevel@tonic-gate uint32_t instr; 6530Sstevel@tonic-gate uint32_t simm13; 6540Sstevel@tonic-gate tstat_tsbmiss_patch_entry_t *ep; 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate ASSERT(MUTEX_HELD(&tstat_lock)); 6570Sstevel@tonic-gate 6580Sstevel@tonic-gate if (!(tstat_options & TSTAT_OPT_TLBDATA)) 6590Sstevel@tonic-gate return; 6600Sstevel@tonic-gate 6610Sstevel@tonic-gate if (!tstat_tsbmiss_patched) { 6620Sstevel@tonic-gate /* 6630Sstevel@tonic-gate * We haven't patched the TSB paths; do so now. 6640Sstevel@tonic-gate */ 6650Sstevel@tonic-gate /*CONSTCOND*/ 6660Sstevel@tonic-gate ASSERT(offsetof(tstat_tlbret_t, ttlbr_ktsb) - 6670Sstevel@tonic-gate offsetof(tstat_tlbret_t, ttlbr_ktlb) == 6680Sstevel@tonic-gate offsetof(tstat_tlbret_t, ttlbr_utsb) - 6690Sstevel@tonic-gate offsetof(tstat_tlbret_t, ttlbr_utlb)); 6700Sstevel@tonic-gate 6710Sstevel@tonic-gate simm13 = offsetof(tstat_tlbret_t, ttlbr_ktsb) - 6720Sstevel@tonic-gate offsetof(tstat_tlbret_t, ttlbr_ktlb); 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate for (ep = tstat_tsbmiss_patch_table; ep->tpe_addr; ep++) { 6750Sstevel@tonic-gate ASSERT(ep->tpe_instr == 0); 6760Sstevel@tonic-gate instr = ep->tpe_instr = *ep->tpe_addr; 6770Sstevel@tonic-gate 6780Sstevel@tonic-gate /* 6790Sstevel@tonic-gate * Assert that the instruction we're about to patch is 6800Sstevel@tonic-gate * "add %g7, 0, %g7" (0x8e01e000). 6810Sstevel@tonic-gate */ 6820Sstevel@tonic-gate ASSERT(instr == TSTAT_TSBMISS_INSTR); 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate instr |= simm13; 6850Sstevel@tonic-gate hot_patch_kernel_text((caddr_t)ep->tpe_addr, 6860Sstevel@tonic-gate instr, sizeof (instr)); 6870Sstevel@tonic-gate } 6880Sstevel@tonic-gate 6890Sstevel@tonic-gate tstat_tsbmiss_patched = 1; 6900Sstevel@tonic-gate 6910Sstevel@tonic-gate } else { 6920Sstevel@tonic-gate /* 6930Sstevel@tonic-gate * Remove patches from the TSB paths. 6940Sstevel@tonic-gate */ 6950Sstevel@tonic-gate for (ep = tstat_tsbmiss_patch_table; ep->tpe_addr; ep++) { 6960Sstevel@tonic-gate ASSERT(ep->tpe_instr == TSTAT_TSBMISS_INSTR); 6970Sstevel@tonic-gate hot_patch_kernel_text((caddr_t)ep->tpe_addr, 6980Sstevel@tonic-gate ep->tpe_instr, sizeof (instr)); 6990Sstevel@tonic-gate ep->tpe_instr = 0; 7000Sstevel@tonic-gate } 7010Sstevel@tonic-gate 7020Sstevel@tonic-gate tstat_tsbmiss_patched = 0; 7030Sstevel@tonic-gate } 7040Sstevel@tonic-gate } 7050Sstevel@tonic-gate 7060Sstevel@tonic-gate /* 7070Sstevel@tonic-gate * This is the routine executed to clock the performance of the trap table, 7080Sstevel@tonic-gate * executed both before and after interposing on the trap table to attempt to 7090Sstevel@tonic-gate * determine probe effect. The probe effect is used to adjust the "%tim" 7100Sstevel@tonic-gate * fields of trapstat's -t and -T output; we only use TLB misses to clock the 7110Sstevel@tonic-gate * trap table. We execute the inner loop (which is designed to exceed the 7120Sstevel@tonic-gate * TLB's reach) nlaps times, taking the best time as our time (thereby 7130Sstevel@tonic-gate * factoring out the effects of interrupts, cache misses or other perturbing 7140Sstevel@tonic-gate * events. 7150Sstevel@tonic-gate */ 7160Sstevel@tonic-gate static hrtime_t 7170Sstevel@tonic-gate trapstat_probe_laps(int nlaps, hrtime_t *buf) 7180Sstevel@tonic-gate { 7190Sstevel@tonic-gate int i, j = 0; 7200Sstevel@tonic-gate hrtime_t ts, best = INT64_MAX; 7210Sstevel@tonic-gate 7220Sstevel@tonic-gate while (nlaps--) { 7230Sstevel@tonic-gate ts = rdtick(); 7240Sstevel@tonic-gate 7250Sstevel@tonic-gate for (i = 0; i < TSTAT_PROBE_SIZE; i += MMU_PAGESIZE) 7260Sstevel@tonic-gate *((volatile char *)&tstat_probe_area[i]); 7270Sstevel@tonic-gate 7280Sstevel@tonic-gate if ((ts = rdtick() - ts) < best) 7290Sstevel@tonic-gate best = ts; 7300Sstevel@tonic-gate buf[j++] = ts; 7310Sstevel@tonic-gate } 7320Sstevel@tonic-gate 7330Sstevel@tonic-gate return (best); 7340Sstevel@tonic-gate } 7350Sstevel@tonic-gate 7360Sstevel@tonic-gate /* 7370Sstevel@tonic-gate * This routine determines the probe effect by calling trapstat_probe_laps() 7380Sstevel@tonic-gate * both without and with the interposing trap table. Note that this is 7390Sstevel@tonic-gate * called from a cross call on the desired CPU, and that it is called on 7400Sstevel@tonic-gate * every CPU (this is necessary because the probe effect may differ from 7410Sstevel@tonic-gate * one CPU to another). 7420Sstevel@tonic-gate */ 7430Sstevel@tonic-gate static void 7440Sstevel@tonic-gate trapstat_probe() 7450Sstevel@tonic-gate { 7460Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id]; 7470Sstevel@tonic-gate hrtime_t before, after; 7480Sstevel@tonic-gate 7490Sstevel@tonic-gate if (!(tcpu->tcpu_flags & TSTAT_CPU_SELECTED)) 7500Sstevel@tonic-gate return; 7510Sstevel@tonic-gate 7520Sstevel@tonic-gate if (tstat_probe_area == NULL || (tstat_options & TSTAT_OPT_NOGO)) 7530Sstevel@tonic-gate return; 7540Sstevel@tonic-gate 7550Sstevel@tonic-gate /* 7560Sstevel@tonic-gate * We very much expect the %tba to be KERNELBASE; this is a 7570Sstevel@tonic-gate * precautionary measure to assure that trapstat doesn't melt the 7580Sstevel@tonic-gate * machine should the %tba point unexpectedly elsewhere. 7590Sstevel@tonic-gate */ 7600Sstevel@tonic-gate if (get_tba() != (caddr_t)KERNELBASE) 7610Sstevel@tonic-gate return; 7620Sstevel@tonic-gate 7630Sstevel@tonic-gate /* 7640Sstevel@tonic-gate * Preserve this CPU's data before destroying it by enabling the 7650Sstevel@tonic-gate * interposing trap table. We can safely use tstat_buffer because 7660Sstevel@tonic-gate * the caller of the trapstat_probe() cross call is holding tstat_lock. 7670Sstevel@tonic-gate */ 7680Sstevel@tonic-gate bcopy(tcpu->tcpu_data, tstat_buffer, tstat_data_t_size); 7690Sstevel@tonic-gate 7700Sstevel@tonic-gate tstat_probe_time = gethrtime(); 7710Sstevel@tonic-gate 7720Sstevel@tonic-gate before = trapstat_probe_laps(TSTAT_PROBE_NLAPS, tstat_probe_before); 7730Sstevel@tonic-gate (void) set_tba(tcpu->tcpu_ibase); 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate after = trapstat_probe_laps(TSTAT_PROBE_NLAPS, tstat_probe_after); 7760Sstevel@tonic-gate (void) set_tba((caddr_t)KERNELBASE); 7770Sstevel@tonic-gate 7780Sstevel@tonic-gate tstat_probe_time = gethrtime() - tstat_probe_time; 7790Sstevel@tonic-gate 7800Sstevel@tonic-gate bcopy(tstat_buffer, tcpu->tcpu_data, tstat_data_t_size); 7810Sstevel@tonic-gate tcpu->tcpu_data->tdata_peffect = (after - before) / TSTAT_PROBE_NPAGES; 7820Sstevel@tonic-gate } 7830Sstevel@tonic-gate 7840Sstevel@tonic-gate static void 7850Sstevel@tonic-gate trapstat_probe_alloc() 7860Sstevel@tonic-gate { 7870Sstevel@tonic-gate pfn_t pfn; 7880Sstevel@tonic-gate caddr_t va; 7890Sstevel@tonic-gate int i; 7900Sstevel@tonic-gate 7910Sstevel@tonic-gate ASSERT(MUTEX_HELD(&tstat_lock)); 7920Sstevel@tonic-gate ASSERT(tstat_probe_area == NULL); 7930Sstevel@tonic-gate ASSERT(tstat_probe_phys == NULL); 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate if (!(tstat_options & TSTAT_OPT_TLBDATA)) 7960Sstevel@tonic-gate return; 7970Sstevel@tonic-gate 7980Sstevel@tonic-gate /* 7990Sstevel@tonic-gate * Grab some virtual from the heap arena. 8000Sstevel@tonic-gate */ 8010Sstevel@tonic-gate tstat_probe_area = vmem_alloc(heap_arena, TSTAT_PROBE_SIZE, VM_SLEEP); 8020Sstevel@tonic-gate va = tstat_probe_area; 8030Sstevel@tonic-gate 8040Sstevel@tonic-gate /* 8050Sstevel@tonic-gate * Grab a single physical page. 8060Sstevel@tonic-gate */ 8070Sstevel@tonic-gate tstat_probe_phys = vmem_alloc(tstat_arena, MMU_PAGESIZE, VM_SLEEP); 8080Sstevel@tonic-gate pfn = hat_getpfnum(kas.a_hat, tstat_probe_phys); 8090Sstevel@tonic-gate 8100Sstevel@tonic-gate /* 8110Sstevel@tonic-gate * Now set the translation for every page in our virtual range 8120Sstevel@tonic-gate * to be our allocated physical page. 8130Sstevel@tonic-gate */ 8140Sstevel@tonic-gate for (i = 0; i < TSTAT_PROBE_NPAGES; i++) { 8150Sstevel@tonic-gate hat_devload(kas.a_hat, va, MMU_PAGESIZE, pfn, PROT_READ, 8160Sstevel@tonic-gate HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK); 8170Sstevel@tonic-gate va += MMU_PAGESIZE; 8180Sstevel@tonic-gate } 8190Sstevel@tonic-gate } 8200Sstevel@tonic-gate 8210Sstevel@tonic-gate static void 8220Sstevel@tonic-gate trapstat_probe_free() 8230Sstevel@tonic-gate { 8240Sstevel@tonic-gate caddr_t va; 8250Sstevel@tonic-gate int i; 8260Sstevel@tonic-gate 8270Sstevel@tonic-gate ASSERT(MUTEX_HELD(&tstat_lock)); 8280Sstevel@tonic-gate 8290Sstevel@tonic-gate if ((va = tstat_probe_area) == NULL) 8300Sstevel@tonic-gate return; 8310Sstevel@tonic-gate 8320Sstevel@tonic-gate for (i = 0; i < TSTAT_PROBE_NPAGES; i++) { 8330Sstevel@tonic-gate hat_unload(kas.a_hat, va, MMU_PAGESIZE, HAT_UNLOAD_UNLOCK); 8340Sstevel@tonic-gate va += MMU_PAGESIZE; 8350Sstevel@tonic-gate } 8360Sstevel@tonic-gate 8370Sstevel@tonic-gate vmem_free(tstat_arena, tstat_probe_phys, MMU_PAGESIZE); 8380Sstevel@tonic-gate vmem_free(heap_arena, tstat_probe_area, TSTAT_PROBE_SIZE); 8390Sstevel@tonic-gate 8400Sstevel@tonic-gate tstat_probe_phys = NULL; 8410Sstevel@tonic-gate tstat_probe_area = NULL; 8420Sstevel@tonic-gate } 8430Sstevel@tonic-gate 8440Sstevel@tonic-gate /* 8450Sstevel@tonic-gate * This routine actually enables a CPU by setting its %tba to be the 8460Sstevel@tonic-gate * CPU's interposing trap table. It is called out of cross call context. 8470Sstevel@tonic-gate */ 8480Sstevel@tonic-gate static void 8490Sstevel@tonic-gate trapstat_enable() 8500Sstevel@tonic-gate { 8510Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id]; 8520Sstevel@tonic-gate 8530Sstevel@tonic-gate if (!(tcpu->tcpu_flags & TSTAT_CPU_SELECTED)) 8540Sstevel@tonic-gate return; 8550Sstevel@tonic-gate 8560Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED); 8570Sstevel@tonic-gate ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED)); 8580Sstevel@tonic-gate 8590Sstevel@tonic-gate if (get_tba() != (caddr_t)KERNELBASE) 8600Sstevel@tonic-gate return; 8610Sstevel@tonic-gate 8620Sstevel@tonic-gate if (!(tstat_options & TSTAT_OPT_NOGO)) 8630Sstevel@tonic-gate (void) set_tba(tcpu->tcpu_ibase); 8640Sstevel@tonic-gate tcpu->tcpu_flags |= TSTAT_CPU_ENABLED; 8650Sstevel@tonic-gate #ifdef sun4v 8661050Sgirish if ((tstat_options & TSTAT_OPT_TLBDATA) && 8671050Sgirish !(tstat_options & TSTAT_OPT_NOGO)) { 8681050Sgirish if (tstat_fast_tlbstat) { 8691050Sgirish /* 8701050Sgirish * Invoke processor specific interface to enable 8711050Sgirish * collection of TSB hit statistics. 8721050Sgirish */ 8731050Sgirish cpu_trapstat_conf(CPU_TSTATCONF_ENABLE); 8741050Sgirish } else { 8751050Sgirish /* 8761050Sgirish * Collect TLB miss statistics by taking over 8771050Sgirish * TLB miss handling from the hypervisor. This 8781050Sgirish * is done by telling the hypervisor that there 8791050Sgirish * is no TSB configured. Also set TSTAT_TLB_STATS 8801050Sgirish * flag so that no user TSB is configured during 8811050Sgirish * context switch time. 8821050Sgirish */ 8831050Sgirish cpu_t *cp = CPU; 8840Sstevel@tonic-gate 8851050Sgirish cp->cpu_m.cpu_tstat_flags |= TSTAT_TLB_STATS; 8861050Sgirish (void) hv_set_ctx0(NULL, NULL); 8871050Sgirish (void) hv_set_ctxnon0(NULL, NULL); 8881050Sgirish } 8890Sstevel@tonic-gate } 8900Sstevel@tonic-gate #endif 8910Sstevel@tonic-gate } 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate /* 8940Sstevel@tonic-gate * This routine disables a CPU (vis a vis trapstat) by setting its %tba to be 8950Sstevel@tonic-gate * the actual, underlying trap table. It is called out of cross call context. 8960Sstevel@tonic-gate */ 8970Sstevel@tonic-gate static void 8980Sstevel@tonic-gate trapstat_disable() 8990Sstevel@tonic-gate { 9000Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id]; 9010Sstevel@tonic-gate 9020Sstevel@tonic-gate if (!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED)) 9030Sstevel@tonic-gate return; 9040Sstevel@tonic-gate 9050Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED); 9060Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED); 9070Sstevel@tonic-gate 9080Sstevel@tonic-gate if (!(tstat_options & TSTAT_OPT_NOGO)) 9090Sstevel@tonic-gate (void) set_tba((caddr_t)KERNELBASE); 9100Sstevel@tonic-gate 9110Sstevel@tonic-gate tcpu->tcpu_flags &= ~TSTAT_CPU_ENABLED; 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate #ifdef sun4v 9141050Sgirish if ((tstat_options & TSTAT_OPT_TLBDATA) && 9151050Sgirish !(tstat_options & TSTAT_OPT_NOGO)) { 9161050Sgirish if (tstat_fast_tlbstat) { 9171050Sgirish /* 9181050Sgirish * Invoke processor specific interface to disable 9191050Sgirish * collection of TSB hit statistics on each processor. 9201050Sgirish */ 9211050Sgirish cpu_trapstat_conf(CPU_TSTATCONF_DISABLE); 9221050Sgirish } else { 9231050Sgirish /* 9241050Sgirish * As part of collecting TLB miss statistics, we took 9251050Sgirish * over TLB miss handling from the hypervisor by 9261050Sgirish * telling the hypervisor that NO TSB is configured. 9271050Sgirish * We need to restore that by communicating proper 9281050Sgirish * kernel/user TSB information so that TLB misses 9291050Sgirish * can be handled by the hypervisor or the hardware 9301050Sgirish * more efficiently. 9311050Sgirish * 9321050Sgirish * We restore kernel TSB information right away. 9331050Sgirish * However, to minimize any locking dependency, we 9341050Sgirish * don't restore user TSB information right away. 9351050Sgirish * Instead, we simply clear the TSTAT_TLB_STATS flag 9361050Sgirish * so that the user TSB information is automatically 9371050Sgirish * restored on next context switch. 9381050Sgirish * 9391050Sgirish * Note that the call to restore kernel TSB information 9401050Sgirish * will normally not fail, unless wrong information is 9411050Sgirish * passed here. In that scenario, system will still 9421050Sgirish * continue to function properly with the exception of 9431050Sgirish * kernel handling all the TLB misses. 9441050Sgirish */ 9451050Sgirish struct hv_tsb_block *hvbp = &ksfmmup->sfmmu_hvblock; 9461050Sgirish cpu_t *cp = CPU; 9470Sstevel@tonic-gate 9481050Sgirish cp->cpu_m.cpu_tstat_flags &= ~TSTAT_TLB_STATS; 9491050Sgirish (void) hv_set_ctx0(hvbp->hv_tsb_info_cnt, 9501050Sgirish hvbp->hv_tsb_info_pa); 9511050Sgirish } 9520Sstevel@tonic-gate } 9530Sstevel@tonic-gate #endif 9540Sstevel@tonic-gate } 9550Sstevel@tonic-gate 9560Sstevel@tonic-gate /* 9570Sstevel@tonic-gate * We use %tick as the time base when recording the time spent executing 9580Sstevel@tonic-gate * the trap handler. %tick, however, is not necessarily kept in sync 9590Sstevel@tonic-gate * across CPUs (indeed, different CPUs may have different %tick frequencies). 9600Sstevel@tonic-gate * We therefore cross call onto a CPU to get a snapshot of its data to 9610Sstevel@tonic-gate * copy out; this is the routine executed out of that cross call. 9620Sstevel@tonic-gate */ 9630Sstevel@tonic-gate static void 9640Sstevel@tonic-gate trapstat_snapshot() 9650Sstevel@tonic-gate { 9660Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id]; 9670Sstevel@tonic-gate tstat_data_t *data = tcpu->tcpu_data; 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED); 9700Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED); 9710Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ENABLED); 9720Sstevel@tonic-gate 9730Sstevel@tonic-gate data->tdata_snapts = gethrtime(); 9740Sstevel@tonic-gate data->tdata_snaptick = rdtick(); 9750Sstevel@tonic-gate bcopy(data, tstat_buffer, tstat_data_t_size); 9761050Sgirish #ifdef sun4v 9771050Sgirish /* 9781050Sgirish * Invoke processor specific interface to collect TSB hit 9791050Sgirish * statistics on each processor. 9801050Sgirish */ 9811050Sgirish if ((tstat_options & TSTAT_OPT_TLBDATA) && tstat_fast_tlbstat) 9821050Sgirish cpu_trapstat_data((void *) tstat_buffer->tdata_pgsz, 9831050Sgirish tstat_pgszs); 9841050Sgirish #endif 9850Sstevel@tonic-gate } 9860Sstevel@tonic-gate 9870Sstevel@tonic-gate /* 9880Sstevel@tonic-gate * The TSTAT_RETENT_* constants define offsets in the TLB return entry. 9890Sstevel@tonic-gate * They are used only in trapstat_tlbretent() (below) and #undef'd 9900Sstevel@tonic-gate * immediately afterwards. Any change to "retent" in trapstat_tlbretent() 9910Sstevel@tonic-gate * will likely require changes to these constants. 9920Sstevel@tonic-gate */ 9930Sstevel@tonic-gate 9941050Sgirish #ifndef sun4v 9950Sstevel@tonic-gate #define TSTAT_RETENT_STATHI 1 9960Sstevel@tonic-gate #define TSTAT_RETENT_STATLO 2 997490Ssusans #define TSTAT_RETENT_SHIFT 11 998490Ssusans #define TSTAT_RETENT_COUNT_LD 13 999490Ssusans #define TSTAT_RETENT_COUNT_ST 15 1000490Ssusans #define TSTAT_RETENT_TMPTSHI 16 1001490Ssusans #define TSTAT_RETENT_TMPTSLO 17 1002490Ssusans #define TSTAT_RETENT_TIME_LD 19 1003490Ssusans #define TSTAT_RETENT_TIME_ST 21 10040Sstevel@tonic-gate #else /* sun4v */ 10054732Sdavemq #define TSTAT_RETENT_TDATASHFT 2 10064732Sdavemq #define TSTAT_RETENT_STATHI 4 10074732Sdavemq #define TSTAT_RETENT_STATLO 6 10084732Sdavemq #define TSTAT_RETENT_SHIFT 9 10094732Sdavemq #define TSTAT_RETENT_COUNT_LD 11 10104732Sdavemq #define TSTAT_RETENT_COUNT_ST 13 10114732Sdavemq #define TSTAT_RETENT_TMPTSHI 14 10124732Sdavemq #define TSTAT_RETENT_TMPTSLO 16 10134732Sdavemq #define TSTAT_RETENT_TIME_LD 18 10144732Sdavemq #define TSTAT_RETENT_TIME_ST 20 10150Sstevel@tonic-gate #endif /* sun4v */ 10160Sstevel@tonic-gate 10170Sstevel@tonic-gate static void 10180Sstevel@tonic-gate trapstat_tlbretent(tstat_percpu_t *tcpu, tstat_tlbretent_t *ret, 10190Sstevel@tonic-gate tstat_missdata_t *data) 10200Sstevel@tonic-gate { 10210Sstevel@tonic-gate uint32_t *ent = ret->ttlbrent_instr, shift; 10224732Sdavemq uintptr_t base; 10234732Sdavemq #ifndef sun4v 10244732Sdavemq uintptr_t tmptick = TSTAT_DATA_OFFS(tcpu, tdata_tmptick); 10254732Sdavemq #else 10264732Sdavemq uintptr_t tmptick = TSTAT_CPU0_DATA_OFFS(tcpu, tdata_tmptick); 10274732Sdavemq #endif 10280Sstevel@tonic-gate 10290Sstevel@tonic-gate /* 10300Sstevel@tonic-gate * This is the entry executed upon return from the TLB/TSB miss 10310Sstevel@tonic-gate * handler (i.e. the code interpositioned between the "retry" and 10320Sstevel@tonic-gate * the actual return to the TLB-missing instruction). Detail on its 10330Sstevel@tonic-gate * theory of operation can be found in the "TLB Statistics" section 10340Sstevel@tonic-gate * of the block comment. Note that we expect the TTE just loaded 10350Sstevel@tonic-gate * into the TLB to be in %g5; all other globals are available as 10360Sstevel@tonic-gate * scratch. Finally, note that the page size information in sun4v is 10370Sstevel@tonic-gate * located in the lower bits of the TTE -- requiring us to have a 10380Sstevel@tonic-gate * different return entry on sun4v. 10390Sstevel@tonic-gate */ 10400Sstevel@tonic-gate static const uint32_t retent[TSTAT_TLBRET_NINSTR] = { 10410Sstevel@tonic-gate #ifndef sun4v 10420Sstevel@tonic-gate 0x87410000, /* rd %tick, %g3 */ 10430Sstevel@tonic-gate 0x03000000, /* sethi %hi(stat), %g1 */ 10440Sstevel@tonic-gate 0x82106000, /* or %g1, %lo(stat), %g1 */ 10450Sstevel@tonic-gate 0x89297001, /* sllx %g5, 1, %g4 */ 10460Sstevel@tonic-gate 0x8931303e, /* srlx %g4, 62, %g4 */ 10470Sstevel@tonic-gate 0x8531702e, /* srlx %g5, 46, %g2 */ 10480Sstevel@tonic-gate 0x8408a004, /* and %g2, 4, %g2 */ 10490Sstevel@tonic-gate 0x88110002, /* or %g4, %g2, %g4 */ 1050490Ssusans 0x80a12005, /* cmp %g4, 5 */ 1051490Ssusans 0x34400002, /* bg,a,pn %icc, +8 */ 1052490Ssusans 0x88102004, /* mov 4, %g4 */ 10530Sstevel@tonic-gate 0x89292000, /* sll %g4, shift, %g4 */ 10540Sstevel@tonic-gate 0x82004004, /* add %g1, %g4, %g1 */ 10550Sstevel@tonic-gate 0xc4586000, /* ldx [%g1 + tmiss_count], %g2 */ 10560Sstevel@tonic-gate 0x8400a001, /* add %g2, 1, %g2 */ 10570Sstevel@tonic-gate 0xc4706000, /* stx %g2, [%g1 + tmiss_count] */ 10580Sstevel@tonic-gate 0x0d000000, /* sethi %hi(tdata_tmptick), %g6 */ 10590Sstevel@tonic-gate 0xc459a000, /* ldx [%g6 + %lo(tdata_tmptick)], %g2 */ 10600Sstevel@tonic-gate 0x8620c002, /* sub %g3, %g2, %g3 */ 10610Sstevel@tonic-gate 0xc4586000, /* ldx [%g1 + tmiss_time], %g2 */ 10620Sstevel@tonic-gate 0x84008003, /* add %g2, %g3, %g2 */ 10630Sstevel@tonic-gate 0xc4706000, /* stx %g2, [%g1 + tmiss_time] */ 10640Sstevel@tonic-gate 0x83f00000 /* retry */ 10650Sstevel@tonic-gate #else /* sun4v */ 10664732Sdavemq 0x82102008, /* mov SCRATCHPAD_CPUID, %g1 */ 10674732Sdavemq 0xced84400, /* ldxa [%g1]ASI_SCRATCHPAD, %g7 */ 10684732Sdavemq 0x8f29f000, /* sllx %g7, TSTAT_DATA_SHIFT, %g7 */ 10690Sstevel@tonic-gate 0x87410000, /* rd %tick, %g3 */ 10700Sstevel@tonic-gate 0x03000000, /* sethi %hi(stat), %g1 */ 10714732Sdavemq 0x82004007, /* add %g1, %g7, %g1 */ 10720Sstevel@tonic-gate 0x82106000, /* or %g1, %lo(stat), %g1 */ 10730Sstevel@tonic-gate 0x8929703d, /* sllx %g5, 61, %g4 */ 10740Sstevel@tonic-gate 0x8931303d, /* srlx %g4, 61, %g4 */ 10750Sstevel@tonic-gate 0x89292000, /* sll %g4, shift, %g4 */ 10760Sstevel@tonic-gate 0x82004004, /* add %g1, %g4, %g1 */ 10770Sstevel@tonic-gate 0xc4586000, /* ldx [%g1 + tmiss_count], %g2 */ 10780Sstevel@tonic-gate 0x8400a001, /* add %g2, 1, %g2 */ 10790Sstevel@tonic-gate 0xc4706000, /* stx %g2, [%g1 + tmiss_count] */ 10800Sstevel@tonic-gate 0x0d000000, /* sethi %hi(tdata_tmptick), %g6 */ 10814732Sdavemq 0x8c018007, /* add %g6, %g7, %g6 */ 10820Sstevel@tonic-gate 0xc459a000, /* ldx [%g6 + %lo(tdata_tmptick)], %g2 */ 10830Sstevel@tonic-gate 0x8620c002, /* sub %g3, %g2, %g3 */ 10840Sstevel@tonic-gate 0xc4586000, /* ldx [%g1 + tmiss_time], %g2 */ 10850Sstevel@tonic-gate 0x84008003, /* add %g2, %g3, %g2 */ 10860Sstevel@tonic-gate 0xc4706000, /* stx %g2, [%g1 + tmiss_time] */ 10870Sstevel@tonic-gate 0x83f00000 /* retry */ 10880Sstevel@tonic-gate #endif /* sun4v */ 10890Sstevel@tonic-gate }; 10900Sstevel@tonic-gate 10910Sstevel@tonic-gate ASSERT(MUTEX_HELD(&tstat_lock)); 10920Sstevel@tonic-gate /*CONSTCOND*/ 10930Sstevel@tonic-gate ASSERT(offsetof(tstat_missdata_t, tmiss_count) <= LO10(-1)); 10940Sstevel@tonic-gate /*CONSTCOND*/ 10950Sstevel@tonic-gate ASSERT(offsetof(tstat_missdata_t, tmiss_time) <= LO10(-1)); 10960Sstevel@tonic-gate /*CONSTCOND*/ 10970Sstevel@tonic-gate ASSERT(!((sizeof (tstat_pgszdata_t) - 1) & sizeof (tstat_pgszdata_t))); 10980Sstevel@tonic-gate 10990Sstevel@tonic-gate for (shift = 1; (1 << shift) != sizeof (tstat_pgszdata_t); shift++) 11000Sstevel@tonic-gate continue; 11010Sstevel@tonic-gate 11024732Sdavemq base = (uintptr_t)tcpu->tcpu_ibase + TSTAT_INSTR_SIZE + 11030Sstevel@tonic-gate ((uintptr_t)data - (uintptr_t)tcpu->tcpu_data); 11040Sstevel@tonic-gate 11050Sstevel@tonic-gate bcopy(retent, ent, sizeof (retent)); 11060Sstevel@tonic-gate 11074732Sdavemq #if defined(sun4v) 11084732Sdavemq ent[TSTAT_RETENT_TDATASHFT] |= LO10((uintptr_t)TSTAT_DATA_SHIFT); 11094732Sdavemq #endif 11100Sstevel@tonic-gate ent[TSTAT_RETENT_STATHI] |= HI22(base); 11110Sstevel@tonic-gate ent[TSTAT_RETENT_STATLO] |= LO10(base); 11120Sstevel@tonic-gate ent[TSTAT_RETENT_SHIFT] |= shift; 11130Sstevel@tonic-gate /* LINTED E_EXPR_NULL_EFFECT */ 11140Sstevel@tonic-gate ent[TSTAT_RETENT_COUNT_LD] |= offsetof(tstat_missdata_t, tmiss_count); 11150Sstevel@tonic-gate /* LINTED E_EXPR_NULL_EFFECT */ 11160Sstevel@tonic-gate ent[TSTAT_RETENT_COUNT_ST] |= offsetof(tstat_missdata_t, tmiss_count); 11170Sstevel@tonic-gate ent[TSTAT_RETENT_TMPTSHI] |= HI22(tmptick); 11180Sstevel@tonic-gate ent[TSTAT_RETENT_TMPTSLO] |= LO10(tmptick); 11190Sstevel@tonic-gate ent[TSTAT_RETENT_TIME_LD] |= offsetof(tstat_missdata_t, tmiss_time); 11200Sstevel@tonic-gate ent[TSTAT_RETENT_TIME_ST] |= offsetof(tstat_missdata_t, tmiss_time); 11210Sstevel@tonic-gate } 11220Sstevel@tonic-gate 11234732Sdavemq #if defined(sun4v) 11244732Sdavemq #undef TSTAT_RETENT_TDATASHFT 11254732Sdavemq #endif 11260Sstevel@tonic-gate #undef TSTAT_RETENT_STATHI 11270Sstevel@tonic-gate #undef TSTAT_RETENT_STATLO 11280Sstevel@tonic-gate #undef TSTAT_RETENT_SHIFT 11290Sstevel@tonic-gate #undef TSTAT_RETENT_COUNT_LD 11300Sstevel@tonic-gate #undef TSTAT_RETENT_COUNT_ST 11310Sstevel@tonic-gate #undef TSTAT_RETENT_TMPTSHI 11320Sstevel@tonic-gate #undef TSTAT_RETENT_TMPTSLO 11330Sstevel@tonic-gate #undef TSTAT_RETENT_TIME_LD 11340Sstevel@tonic-gate #undef TSTAT_RETENT_TIME_ST 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate /* 11370Sstevel@tonic-gate * The TSTAT_TLBENT_* constants define offsets in the TLB entry. They are 11380Sstevel@tonic-gate * used only in trapstat_tlbent() (below) and #undef'd immediately afterwards. 11390Sstevel@tonic-gate * Any change to "tlbent" in trapstat_tlbent() will likely require changes 11400Sstevel@tonic-gate * to these constants. 11410Sstevel@tonic-gate */ 11420Sstevel@tonic-gate 11430Sstevel@tonic-gate #ifndef sun4v 11440Sstevel@tonic-gate #define TSTAT_TLBENT_STATHI 0 11450Sstevel@tonic-gate #define TSTAT_TLBENT_STATLO_LD 1 11460Sstevel@tonic-gate #define TSTAT_TLBENT_STATLO_ST 3 11470Sstevel@tonic-gate #define TSTAT_TLBENT_MMUASI 15 11480Sstevel@tonic-gate #define TSTAT_TLBENT_TPCHI 18 11490Sstevel@tonic-gate #define TSTAT_TLBENT_TPCLO_USER 19 11500Sstevel@tonic-gate #define TSTAT_TLBENT_TPCLO_KERN 21 11510Sstevel@tonic-gate #define TSTAT_TLBENT_TSHI 25 11520Sstevel@tonic-gate #define TSTAT_TLBENT_TSLO 27 11530Sstevel@tonic-gate #define TSTAT_TLBENT_BA 28 11540Sstevel@tonic-gate #else /* sun4v */ 11554732Sdavemq #define TSTAT_TLBENT_TDATASHFT 2 11564732Sdavemq #define TSTAT_TLBENT_STATHI 3 11574732Sdavemq #define TSTAT_TLBENT_STATLO_LD 5 11584732Sdavemq #define TSTAT_TLBENT_STATLO_ST 7 11594732Sdavemq #define TSTAT_TLBENT_TAGTARGET 23 11604732Sdavemq #define TSTAT_TLBENT_TPCHI 25 11614732Sdavemq #define TSTAT_TLBENT_TPCLO_USER 26 11624732Sdavemq #define TSTAT_TLBENT_TPCLO_KERN 28 11634732Sdavemq #define TSTAT_TLBENT_TSHI 32 11644732Sdavemq #define TSTAT_TLBENT_TSLO 35 11654732Sdavemq #define TSTAT_TLBENT_BA 36 11660Sstevel@tonic-gate #endif /* sun4v */ 11670Sstevel@tonic-gate 11680Sstevel@tonic-gate static void 11690Sstevel@tonic-gate trapstat_tlbent(tstat_percpu_t *tcpu, int entno) 11700Sstevel@tonic-gate { 11710Sstevel@tonic-gate uint32_t *ent; 11720Sstevel@tonic-gate uintptr_t orig, va, baoffs; 11731050Sgirish #ifndef sun4v 11740Sstevel@tonic-gate int itlb = entno == TSTAT_ENT_ITLBMISS; 11754732Sdavemq uint32_t asi = itlb ? ASI(ASI_IMMU) : ASI(ASI_DMMU); 11761050Sgirish #else 11771050Sgirish int itlb = (entno == TSTAT_ENT_IMMUMISS || entno == TSTAT_ENT_ITLBMISS); 11784732Sdavemq uint32_t tagtarget_off = itlb ? MMFSA_I_CTX : MMFSA_D_CTX; 11794732Sdavemq uint32_t *tent; /* MMU trap vector entry */ 11804732Sdavemq uintptr_t tentva; /* MMU trap vector entry va */ 11814732Sdavemq static const uint32_t mmumiss[TSTAT_ENT_NINSTR] = { 11824732Sdavemq 0x30800000, /* ba,a addr */ 11834732Sdavemq NOP, NOP, NOP, NOP, NOP, NOP, NOP 11844732Sdavemq }; 11851050Sgirish #endif 11860Sstevel@tonic-gate int entoffs = entno << TSTAT_ENT_SHIFT; 11870Sstevel@tonic-gate uintptr_t tmptick, stat, tpc, utpc; 11880Sstevel@tonic-gate tstat_pgszdata_t *data = &tcpu->tcpu_data->tdata_pgsz[0]; 11890Sstevel@tonic-gate tstat_tlbdata_t *udata, *kdata; 11900Sstevel@tonic-gate tstat_tlbret_t *ret; 11910Sstevel@tonic-gate 11920Sstevel@tonic-gate /* 11930Sstevel@tonic-gate * When trapstat is run with TLB statistics, this is the entry for 11940Sstevel@tonic-gate * both I- and D-TLB misses; this code performs trap level pushing, 11950Sstevel@tonic-gate * as described in the "TLB Statistics" section of the block comment. 11960Sstevel@tonic-gate * This code is executing at TL 1; %tstate[0] contains the saved 11970Sstevel@tonic-gate * state at the time of the TLB miss. Pushing trap level 1 (and thus 11980Sstevel@tonic-gate * raising TL to 2) requires us to fill in %tstate[1] with our %pstate, 11990Sstevel@tonic-gate * %cwp and %asi. We leave %tt unchanged, and we set %tpc and %tnpc to 12000Sstevel@tonic-gate * the appropriate TLB return entry (based on the context of the miss). 12010Sstevel@tonic-gate * Finally, we sample %tick, and stash it in the tdata_tmptick member 12020Sstevel@tonic-gate * the per-CPU tstat_data structure. tdata_tmptick will be used in 12030Sstevel@tonic-gate * the TLB return entry to determine the amount of time spent in the 12040Sstevel@tonic-gate * TLB miss handler. 12050Sstevel@tonic-gate * 1206158Sgirish * Note that on sun4v platforms, we must obtain the context information 1207158Sgirish * from the MMU fault status area. (The base address of this MMU fault 1208158Sgirish * status area is kept in the scratchpad register 0.) 12090Sstevel@tonic-gate */ 12100Sstevel@tonic-gate static const uint32_t tlbent[] = { 12110Sstevel@tonic-gate #ifndef sun4v 12120Sstevel@tonic-gate 0x03000000, /* sethi %hi(stat), %g1 */ 12130Sstevel@tonic-gate 0xc4586000, /* ldx [%g1 + %lo(stat)], %g2 */ 12140Sstevel@tonic-gate 0x8400a001, /* add %g2, 1, %g2 */ 12150Sstevel@tonic-gate 0xc4706000, /* stx %g2, [%g1 + %lo(stat)] */ 12160Sstevel@tonic-gate 0x85524000, /* rdpr %cwp, %g2 */ 12170Sstevel@tonic-gate 0x87518000, /* rdpr %pstate, %g3 */ 12180Sstevel@tonic-gate 0x8728f008, /* sllx %g3, 8, %g3 */ 12190Sstevel@tonic-gate 0x84108003, /* or %g2, %g3, %g2 */ 12200Sstevel@tonic-gate 0x8740c000, /* rd %asi, %g3 */ 12210Sstevel@tonic-gate 0x8728f018, /* sllx %g3, 24, %g3 */ 12220Sstevel@tonic-gate 0x84108003, /* or %g2, %g3, %g2 */ 12230Sstevel@tonic-gate 0x8350c000, /* rdpr %tt, %g1 */ 12240Sstevel@tonic-gate 0x8f902002, /* wrpr %g0, 2, %tl */ 12250Sstevel@tonic-gate 0x85908000, /* wrpr %g2, %g0, %tstate */ 12260Sstevel@tonic-gate 0x87904000, /* wrpr %g1, %g0, %tt */ 12270Sstevel@tonic-gate 0xc2d80000, /* ldxa [%g0]ASI_MMU, %g1 */ 12280Sstevel@tonic-gate 0x83307030, /* srlx %g1, CTXSHIFT, %g1 */ 12290Sstevel@tonic-gate 0x02c04004, /* brz,pn %g1, .+0x10 */ 12300Sstevel@tonic-gate 0x03000000, /* sethi %hi(new_tpc), %g1 */ 12310Sstevel@tonic-gate 0x82106000, /* or %g1, %lo(new_tpc), %g1 */ 12320Sstevel@tonic-gate 0x30800002, /* ba,a .+0x8 */ 12330Sstevel@tonic-gate 0x82106000, /* or %g1, %lo(new_tpc), %g1 */ 12340Sstevel@tonic-gate 0x81904000, /* wrpr %g1, %g0, %tpc */ 12350Sstevel@tonic-gate 0x82006004, /* add %g1, 4, %g1 */ 12360Sstevel@tonic-gate 0x83904000, /* wrpr %g1, %g0, %tnpc */ 12370Sstevel@tonic-gate 0x03000000, /* sethi %hi(tmptick), %g1 */ 12380Sstevel@tonic-gate 0x85410000, /* rd %tick, %g2 */ 12390Sstevel@tonic-gate 0xc4706000, /* stx %g2, [%g1 + %lo(tmptick)] */ 12400Sstevel@tonic-gate 0x30800000, /* ba,a addr */ 12410Sstevel@tonic-gate NOP, NOP, NOP 12420Sstevel@tonic-gate #else /* sun4v */ 12434732Sdavemq 0x82102008, /* mov SCRATCHPAD_CPUID, %g1 */ 12444732Sdavemq 0xc8d84400, /* ldxa [%g1]ASI_SCRATCHPAD, %g4 */ 12454732Sdavemq 0x89293000, /* sllx %g4, TSTAT_DATA_SHIFT, %g4 */ 12460Sstevel@tonic-gate 0x03000000, /* sethi %hi(stat), %g1 */ 12474732Sdavemq 0x82004004, /* add %g1, %g4, %g1 */ 12480Sstevel@tonic-gate 0xc4586000, /* ldx [%g1 + %lo(stat)], %g2 */ 12490Sstevel@tonic-gate 0x8400a001, /* add %g2, 1, %g2 */ 12500Sstevel@tonic-gate 0xc4706000, /* stx %g2, [%g1 + %lo(stat)] */ 12510Sstevel@tonic-gate 0x85524000, /* rdpr %cwp, %g2 */ 12520Sstevel@tonic-gate 0x87518000, /* rdpr %pstate, %g3 */ 12530Sstevel@tonic-gate 0x8728f008, /* sllx %g3, 8, %g3 */ 12540Sstevel@tonic-gate 0x84108003, /* or %g2, %g3, %g2 */ 12550Sstevel@tonic-gate 0x8740c000, /* rd %asi, %g3 */ 1256158Sgirish 0x8728f018, /* sllx %g3, 24, %g3 */ 1257158Sgirish 0x83540000, /* rdpr %gl, %g1 */ 1258158Sgirish 0x83287028, /* sllx %g1, 40, %g1 */ 12590Sstevel@tonic-gate 0x86104003, /* or %g1, %g3, %g3 */ 12600Sstevel@tonic-gate 0x84108003, /* or %g2, %g3, %g2 */ 12610Sstevel@tonic-gate 0x8350c000, /* rdpr %tt, %g1 */ 12620Sstevel@tonic-gate 0x8f902002, /* wrpr %g0, 2, %tl */ 12630Sstevel@tonic-gate 0x85908000, /* wrpr %g2, %g0, %tstate */ 12640Sstevel@tonic-gate 0x87904000, /* wrpr %g1, %g0, %tt */ 12650Sstevel@tonic-gate 0xc2d80400, /* ldxa [%g0]ASI_SCRATCHPAD, %g1 */ 12660Sstevel@tonic-gate 0xc2586000, /* ldx [%g1 + MMFSA_?_CTX], %g1 */ 12670Sstevel@tonic-gate 0x02c04004, /* brz,pn %g1, .+0x10 */ 12680Sstevel@tonic-gate 0x03000000, /* sethi %hi(new_tpc), %g1 */ 12690Sstevel@tonic-gate 0x82106000, /* or %g1, %lo(new_tpc), %g1 */ 12700Sstevel@tonic-gate 0x30800002, /* ba,a .+0x8 */ 12710Sstevel@tonic-gate 0x82106000, /* or %g1, %lo(new_tpc), %g1 */ 12720Sstevel@tonic-gate 0x81904000, /* wrpr %g1, %g0, %tpc */ 12730Sstevel@tonic-gate 0x82006004, /* add %g1, 4, %g1 */ 12740Sstevel@tonic-gate 0x83904000, /* wrpr %g1, %g0, %tnpc */ 12750Sstevel@tonic-gate 0x03000000, /* sethi %hi(tmptick), %g1 */ 12764732Sdavemq 0x82004004, /* add %g1, %g4, %g1 */ 12770Sstevel@tonic-gate 0x85410000, /* rd %tick, %g2 */ 12780Sstevel@tonic-gate 0xc4706000, /* stx %g2, [%g1 + %lo(tmptick)] */ 12790Sstevel@tonic-gate 0x30800000 /* ba,a addr */ 12800Sstevel@tonic-gate #endif /* sun4v */ 12810Sstevel@tonic-gate }; 12820Sstevel@tonic-gate 12830Sstevel@tonic-gate ASSERT(MUTEX_HELD(&tstat_lock)); 12841050Sgirish #ifndef sun4v 12850Sstevel@tonic-gate ASSERT(entno == TSTAT_ENT_ITLBMISS || entno == TSTAT_ENT_DTLBMISS); 12860Sstevel@tonic-gate 12870Sstevel@tonic-gate stat = TSTAT_DATA_OFFS(tcpu, tdata_traps) + entoffs; 12880Sstevel@tonic-gate tmptick = TSTAT_DATA_OFFS(tcpu, tdata_tmptick); 12894732Sdavemq #else /* sun4v */ 12904732Sdavemq ASSERT(entno == TSTAT_ENT_ITLBMISS || entno == TSTAT_ENT_DTLBMISS || 12914732Sdavemq entno == TSTAT_ENT_IMMUMISS || entno == TSTAT_ENT_DMMUMISS); 12924732Sdavemq 12934732Sdavemq stat = TSTAT_CPU0_DATA_OFFS(tcpu, tdata_traps) + entoffs; 12944732Sdavemq tmptick = TSTAT_CPU0_DATA_OFFS(tcpu, tdata_tmptick); 12954732Sdavemq #endif /* sun4v */ 12960Sstevel@tonic-gate 12970Sstevel@tonic-gate if (itlb) { 12980Sstevel@tonic-gate ret = &tcpu->tcpu_instr->tinst_itlbret; 12990Sstevel@tonic-gate udata = &data->tpgsz_user.tmode_itlb; 13000Sstevel@tonic-gate kdata = &data->tpgsz_kernel.tmode_itlb; 13010Sstevel@tonic-gate tpc = TSTAT_INSTR_OFFS(tcpu, tinst_itlbret.ttlbr_ktlb); 13020Sstevel@tonic-gate } else { 13030Sstevel@tonic-gate ret = &tcpu->tcpu_instr->tinst_dtlbret; 13040Sstevel@tonic-gate udata = &data->tpgsz_user.tmode_dtlb; 13050Sstevel@tonic-gate kdata = &data->tpgsz_kernel.tmode_dtlb; 13060Sstevel@tonic-gate tpc = TSTAT_INSTR_OFFS(tcpu, tinst_dtlbret.ttlbr_ktlb); 13070Sstevel@tonic-gate } 13080Sstevel@tonic-gate 13090Sstevel@tonic-gate utpc = tpc + offsetof(tstat_tlbret_t, ttlbr_utlb) - 13100Sstevel@tonic-gate offsetof(tstat_tlbret_t, ttlbr_ktlb); 13110Sstevel@tonic-gate 13120Sstevel@tonic-gate ASSERT(HI22(tpc) == HI22(utpc)); 13130Sstevel@tonic-gate 13140Sstevel@tonic-gate ent = (uint32_t *)((uintptr_t)tcpu->tcpu_instr + entoffs); 13150Sstevel@tonic-gate orig = KERNELBASE + entoffs; 13160Sstevel@tonic-gate va = (uintptr_t)tcpu->tcpu_ibase + entoffs; 13170Sstevel@tonic-gate baoffs = TSTAT_TLBENT_BA * sizeof (uint32_t); 13180Sstevel@tonic-gate 13191050Sgirish #ifdef sun4v 13204732Sdavemq /* 13214732Sdavemq * Because of lack of space, interposing tlbent trap handler 13224732Sdavemq * for TLB and MMU miss traps cannot be placed in-line. Instead, 13234732Sdavemq * we copy it to the space set aside for shared trap handlers 13244732Sdavemq * continuation in the interposing trap table and invoke it by 13254732Sdavemq * placing a branch in the trap table itself. 13264732Sdavemq */ 13274732Sdavemq tent = ent; /* trap vector entry */ 13284732Sdavemq tentva = va; /* trap vector entry va */ 13291050Sgirish 13304732Sdavemq if (itlb) { 13314732Sdavemq ent = (uint32_t *)((uintptr_t) 13324732Sdavemq &tcpu->tcpu_instr->tinst_immumiss); 13334732Sdavemq va = TSTAT_INSTR_OFFS(tcpu, tinst_immumiss); 13344732Sdavemq } else { 13354732Sdavemq ent = (uint32_t *)((uintptr_t) 13364732Sdavemq &tcpu->tcpu_instr->tinst_dmmumiss); 13374732Sdavemq va = TSTAT_INSTR_OFFS(tcpu, tinst_dmmumiss); 13381050Sgirish } 13394732Sdavemq bcopy(mmumiss, tent, sizeof (mmumiss)); 13404732Sdavemq tent[0] |= DISP22(tentva, va); 13411050Sgirish #endif /* sun4v */ 13421050Sgirish 13430Sstevel@tonic-gate bcopy(tlbent, ent, sizeof (tlbent)); 13440Sstevel@tonic-gate 13454732Sdavemq #if defined(sun4v) 13464732Sdavemq ent[TSTAT_TLBENT_TDATASHFT] |= LO10((uintptr_t)TSTAT_DATA_SHIFT); 13474732Sdavemq #endif 13480Sstevel@tonic-gate ent[TSTAT_TLBENT_STATHI] |= HI22(stat); 13490Sstevel@tonic-gate ent[TSTAT_TLBENT_STATLO_LD] |= LO10(stat); 13500Sstevel@tonic-gate ent[TSTAT_TLBENT_STATLO_ST] |= LO10(stat); 13510Sstevel@tonic-gate #ifndef sun4v 13520Sstevel@tonic-gate ent[TSTAT_TLBENT_MMUASI] |= asi; 13530Sstevel@tonic-gate #else 13540Sstevel@tonic-gate ent[TSTAT_TLBENT_TAGTARGET] |= tagtarget_off; 13550Sstevel@tonic-gate #endif 13560Sstevel@tonic-gate ent[TSTAT_TLBENT_TPCHI] |= HI22(tpc); 13570Sstevel@tonic-gate ent[TSTAT_TLBENT_TPCLO_USER] |= LO10(utpc); 13580Sstevel@tonic-gate ent[TSTAT_TLBENT_TPCLO_KERN] |= LO10(tpc); 13590Sstevel@tonic-gate ent[TSTAT_TLBENT_TSHI] |= HI22(tmptick); 13600Sstevel@tonic-gate ent[TSTAT_TLBENT_TSLO] |= LO10(tmptick); 13610Sstevel@tonic-gate ent[TSTAT_TLBENT_BA] |= DISP22(va + baoffs, orig); 13620Sstevel@tonic-gate 13630Sstevel@tonic-gate /* 13640Sstevel@tonic-gate * And now set up the TLB return entries. 13650Sstevel@tonic-gate */ 13660Sstevel@tonic-gate trapstat_tlbretent(tcpu, &ret->ttlbr_ktlb, &kdata->ttlb_tlb); 13670Sstevel@tonic-gate trapstat_tlbretent(tcpu, &ret->ttlbr_ktsb, &kdata->ttlb_tsb); 13680Sstevel@tonic-gate trapstat_tlbretent(tcpu, &ret->ttlbr_utlb, &udata->ttlb_tlb); 13690Sstevel@tonic-gate trapstat_tlbretent(tcpu, &ret->ttlbr_utsb, &udata->ttlb_tsb); 13700Sstevel@tonic-gate } 13710Sstevel@tonic-gate 13724732Sdavemq #if defined(sun4v) 13734732Sdavemq #undef TSTAT_TLBENT_TDATASHFT 13744732Sdavemq #endif 13750Sstevel@tonic-gate #undef TSTAT_TLBENT_STATHI 13760Sstevel@tonic-gate #undef TSTAT_TLBENT_STATLO_LD 13770Sstevel@tonic-gate #undef TSTAT_TLBENT_STATLO_ST 13780Sstevel@tonic-gate #ifndef sun4v 13790Sstevel@tonic-gate #undef TSTAT_TLBENT_MMUASI 13800Sstevel@tonic-gate #else 13810Sstevel@tonic-gate #undef TSTAT_TLBENT_TAGTARGET 13820Sstevel@tonic-gate #endif 13830Sstevel@tonic-gate #undef TSTAT_TLBENT_TPCHI 13840Sstevel@tonic-gate #undef TSTAT_TLBENT_TPCLO_USER 13850Sstevel@tonic-gate #undef TSTAT_TLBENT_TPCLO_KERN 13860Sstevel@tonic-gate #undef TSTAT_TLBENT_TSHI 13870Sstevel@tonic-gate #undef TSTAT_TLBENT_TSLO 13880Sstevel@tonic-gate #undef TSTAT_TLBENT_BA 13890Sstevel@tonic-gate 13900Sstevel@tonic-gate /* 13910Sstevel@tonic-gate * The TSTAT_ENABLED_* constants define offsets in the enabled entry; the 13920Sstevel@tonic-gate * TSTAT_DISABLED_BA constant defines an offset in the disabled entry. Both 13930Sstevel@tonic-gate * sets of constants are used only in trapstat_make_traptab() (below) and 13940Sstevel@tonic-gate * #undef'd immediately afterwards. Any change to "enabled" or "disabled" 13950Sstevel@tonic-gate * in trapstat_make_traptab() will likely require changes to these constants. 13960Sstevel@tonic-gate */ 13974732Sdavemq #ifndef sun4v 13980Sstevel@tonic-gate #define TSTAT_ENABLED_STATHI 0 13990Sstevel@tonic-gate #define TSTAT_ENABLED_STATLO_LD 1 14000Sstevel@tonic-gate #define TSTAT_ENABLED_STATLO_ST 3 14010Sstevel@tonic-gate #define TSTAT_ENABLED_BA 4 14020Sstevel@tonic-gate #define TSTAT_DISABLED_BA 0 14030Sstevel@tonic-gate 14040Sstevel@tonic-gate static void 14050Sstevel@tonic-gate trapstat_make_traptab(tstat_percpu_t *tcpu) 14060Sstevel@tonic-gate { 14070Sstevel@tonic-gate uint32_t *ent; 14080Sstevel@tonic-gate uint64_t *stat; 14090Sstevel@tonic-gate uintptr_t orig, va, en_baoffs, dis_baoffs; 14100Sstevel@tonic-gate int nent; 14110Sstevel@tonic-gate 14120Sstevel@tonic-gate /* 14130Sstevel@tonic-gate * This is the entry in the interposing trap table for enabled trap 14140Sstevel@tonic-gate * table entries. It loads a counter, increments it and stores it 14150Sstevel@tonic-gate * back before branching to the actual trap table entry. 14160Sstevel@tonic-gate */ 14170Sstevel@tonic-gate static const uint32_t enabled[TSTAT_ENT_NINSTR] = { 14180Sstevel@tonic-gate 0x03000000, /* sethi %hi(stat), %g1 */ 14190Sstevel@tonic-gate 0xc4586000, /* ldx [%g1 + %lo(stat)], %g2 */ 14200Sstevel@tonic-gate 0x8400a001, /* add %g2, 1, %g2 */ 14210Sstevel@tonic-gate 0xc4706000, /* stx %g2, [%g1 + %lo(stat)] */ 14220Sstevel@tonic-gate 0x30800000, /* ba,a addr */ 14230Sstevel@tonic-gate NOP, NOP, NOP 14240Sstevel@tonic-gate }; 14250Sstevel@tonic-gate 14260Sstevel@tonic-gate /* 14270Sstevel@tonic-gate * This is the entry in the interposing trap table for disabled trap 14280Sstevel@tonic-gate * table entries. It simply branches to the actual, underlying trap 14290Sstevel@tonic-gate * table entry. As explained in the "Implementation Details" section 14300Sstevel@tonic-gate * of the block comment, all TL>0 traps _must_ use the disabled entry; 14310Sstevel@tonic-gate * additional entries may be explicitly disabled through the use 14320Sstevel@tonic-gate * of TSTATIOC_ENTRY/TSTATIOC_NOENTRY. 14330Sstevel@tonic-gate */ 14340Sstevel@tonic-gate static const uint32_t disabled[TSTAT_ENT_NINSTR] = { 14350Sstevel@tonic-gate 0x30800000, /* ba,a addr */ 14360Sstevel@tonic-gate NOP, NOP, NOP, NOP, NOP, NOP, NOP, 14370Sstevel@tonic-gate }; 14380Sstevel@tonic-gate 14390Sstevel@tonic-gate ASSERT(MUTEX_HELD(&tstat_lock)); 14400Sstevel@tonic-gate 14410Sstevel@tonic-gate ent = tcpu->tcpu_instr->tinst_traptab; 14420Sstevel@tonic-gate stat = (uint64_t *)TSTAT_DATA_OFFS(tcpu, tdata_traps); 14430Sstevel@tonic-gate orig = KERNELBASE; 14440Sstevel@tonic-gate va = (uintptr_t)tcpu->tcpu_ibase; 14450Sstevel@tonic-gate en_baoffs = TSTAT_ENABLED_BA * sizeof (uint32_t); 14460Sstevel@tonic-gate dis_baoffs = TSTAT_DISABLED_BA * sizeof (uint32_t); 14470Sstevel@tonic-gate 14480Sstevel@tonic-gate for (nent = 0; nent < TSTAT_TOTAL_NENT; nent++) { 14490Sstevel@tonic-gate if (tstat_enabled[nent]) { 14500Sstevel@tonic-gate bcopy(enabled, ent, sizeof (enabled)); 1451567Sdmick ent[TSTAT_ENABLED_STATHI] |= HI22((uintptr_t)stat); 1452567Sdmick ent[TSTAT_ENABLED_STATLO_LD] |= LO10((uintptr_t)stat); 1453567Sdmick ent[TSTAT_ENABLED_STATLO_ST] |= LO10((uintptr_t)stat); 14540Sstevel@tonic-gate ent[TSTAT_ENABLED_BA] |= DISP22(va + en_baoffs, orig); 14550Sstevel@tonic-gate } else { 14560Sstevel@tonic-gate bcopy(disabled, ent, sizeof (disabled)); 14570Sstevel@tonic-gate ent[TSTAT_DISABLED_BA] |= DISP22(va + dis_baoffs, orig); 14580Sstevel@tonic-gate } 14590Sstevel@tonic-gate 14600Sstevel@tonic-gate stat++; 14610Sstevel@tonic-gate orig += sizeof (enabled); 14620Sstevel@tonic-gate ent += sizeof (enabled) / sizeof (*ent); 14630Sstevel@tonic-gate va += sizeof (enabled); 14640Sstevel@tonic-gate } 14650Sstevel@tonic-gate } 14660Sstevel@tonic-gate 14670Sstevel@tonic-gate #undef TSTAT_ENABLED_STATHI 14680Sstevel@tonic-gate #undef TSTAT_ENABLED_STATLO_LD 14690Sstevel@tonic-gate #undef TSTAT_ENABLED_STATLO_ST 14700Sstevel@tonic-gate #undef TSTAT_ENABLED_BA 14710Sstevel@tonic-gate #undef TSTAT_DISABLED_BA 14720Sstevel@tonic-gate 14734732Sdavemq #else /* sun4v */ 14744732Sdavemq 14754732Sdavemq #define TSTAT_ENABLED_STATHI 0 14764732Sdavemq #define TSTAT_ENABLED_STATLO 1 14774732Sdavemq #define TSTAT_ENABLED_ADDRHI 2 14784732Sdavemq #define TSTAT_ENABLED_ADDRLO 3 14794732Sdavemq #define TSTAT_ENABLED_CONTBA 6 14804732Sdavemq #define TSTAT_ENABLED_TDATASHFT 7 14814732Sdavemq #define TSTAT_DISABLED_BA 0 14824732Sdavemq 14834732Sdavemq static void 14844732Sdavemq trapstat_make_traptab(tstat_percpu_t *tcpu) 14854732Sdavemq { 14864732Sdavemq uint32_t *ent; 14874732Sdavemq uint64_t *stat; 14884732Sdavemq uintptr_t orig, va, en_baoffs, dis_baoffs; 14894732Sdavemq uintptr_t tstat_cont_va; 14904732Sdavemq int nent; 14914732Sdavemq 14924732Sdavemq /* 14934732Sdavemq * This is the entry in the interposing trap table for enabled trap 14944732Sdavemq * table entries. It loads a counter, increments it and stores it 14954732Sdavemq * back before branching to the actual trap table entry. 14964732Sdavemq * 14974732Sdavemq * All CPUs share the same interposing trap entry to count the 14984732Sdavemq * number of traps. Note that the trap counter is kept in per CPU 14994732Sdavemq * trap statistics area. Its address is obtained dynamically by 15004732Sdavemq * adding the offset of that CPU's trap statistics area from CPU 0 15014732Sdavemq * (i.e. cpu_id * TSTAT_DATA_SIZE) to the address of the CPU 0 15024732Sdavemq * trap counter already coded in the interposing trap entry itself. 15034732Sdavemq * 15044732Sdavemq * Since this interposing code sequence to count traps takes more 15054732Sdavemq * than 8 instructions, it's split in two parts as follows: 15064732Sdavemq * 15074732Sdavemq * tstat_trapcnt: 15084732Sdavemq * sethi %hi(stat), %g1 15094732Sdavemq * or %g1, %lo[stat), %g1 ! %g1 = CPU0 trap counter addr 15104732Sdavemq * sethi %hi(addr), %g2 15114732Sdavemq * or %g2, %lo(addr), %g2 ! %g2 = real trap handler addr 15124732Sdavemq * mov ASI_SCRATCHPAD_CPUID, %g3 15134732Sdavemq * ldxa [%g3]ASI_SCRATCHPAD, %g3 ! %g3 = CPU ID 15144732Sdavemq * ba tstat_trapcnt_cont ! branch to tstat_trapcnt_cont 15154732Sdavemq * sllx %g3, TSTAT_DATA_SHIFT, %g3 ! %g3 = CPU trapstat data offset 15164732Sdavemq * 15174732Sdavemq * tstat_trapcnt_cont: 15184732Sdavemq * ldx [%g1 + %g3], %g4 ! get counter value 15194732Sdavemq * add %g4, 1, %g4 ! increment value 15204732Sdavemq * jmp %g2 ! jump to original trap handler 15214732Sdavemq * stx %g4, [%g1 + %g3] ! store counter value 15224732Sdavemq * 15234732Sdavemq * First part, i.e. tstat_trapcnt, is per trap and is kept in-line in 15244732Sdavemq * the interposing trap table. However, the tstat_trapcnt_cont code 15254732Sdavemq * sequence is shared by all traps and is kept right after the 15264732Sdavemq * the interposing trap table. 15274732Sdavemq */ 15284732Sdavemq static const uint32_t enabled[TSTAT_ENT_NINSTR] = { 15294732Sdavemq 0x03000000, /* sethi %hi(stat), %g1 */ 15304732Sdavemq 0x82106000, /* or %g1, %lo[stat), %g1 */ 15314732Sdavemq 0x05000000, /* sethi %hi(addr), %g2 */ 15324732Sdavemq 0x8410a000, /* or %g2, %lo(addr), %g2 */ 15334732Sdavemq 0x86102008, /* mov ASI_SCRATCHPAD_CPUID, %g3 */ 15344732Sdavemq 0xc6d8c400, /* ldxa [%g3]ASI_SCRATCHPAD, %g3 */ 15354732Sdavemq 0x10800000, /* ba enabled_cont */ 15364732Sdavemq 0x8728f000 /* sllx %g3, TSTAT_DATA_SHIFT, %g3 */ 15374732Sdavemq }; 15384732Sdavemq 15394732Sdavemq static const uint32_t enabled_cont[TSTAT_ENT_NINSTR] = { 15404732Sdavemq 0xc8584003, /* ldx [%g1 + %g3], %g4 */ 15414732Sdavemq 0x88012001, /* add %g4, 1, %g4 */ 15424732Sdavemq 0x81c08000, /* jmp %g2 */ 15434732Sdavemq 0xc8704003, /* stx %g4, [%g1 + %g3] */ 15444732Sdavemq NOP, NOP, NOP, NOP 15454732Sdavemq }; 15464732Sdavemq 15474732Sdavemq /* 15484732Sdavemq * This is the entry in the interposing trap table for disabled trap 15494732Sdavemq * table entries. It simply branches to the actual, underlying trap 15504732Sdavemq * table entry. As explained in the "Implementation Details" section 15514732Sdavemq * of the block comment, all TL>0 traps _must_ use the disabled entry; 15524732Sdavemq * additional entries may be explicitly disabled through the use 15534732Sdavemq * of TSTATIOC_ENTRY/TSTATIOC_NOENTRY. 15544732Sdavemq */ 15554732Sdavemq static const uint32_t disabled[TSTAT_ENT_NINSTR] = { 15564732Sdavemq 0x30800000, /* ba,a addr */ 15574732Sdavemq NOP, NOP, NOP, NOP, NOP, NOP, NOP, 15584732Sdavemq }; 15594732Sdavemq 15604732Sdavemq ASSERT(MUTEX_HELD(&tstat_lock)); 15614732Sdavemq ent = tcpu->tcpu_instr->tinst_traptab; 15624732Sdavemq stat = (uint64_t *)TSTAT_CPU0_DATA_OFFS(tcpu, tdata_traps); 15634732Sdavemq orig = KERNELBASE; 15644732Sdavemq va = (uintptr_t)tcpu->tcpu_ibase; 15654732Sdavemq en_baoffs = TSTAT_ENABLED_CONTBA * sizeof (uint32_t); 15664732Sdavemq dis_baoffs = TSTAT_DISABLED_BA * sizeof (uint32_t); 15674732Sdavemq tstat_cont_va = TSTAT_INSTR_OFFS(tcpu, tinst_trapcnt); 15684732Sdavemq 15694732Sdavemq for (nent = 0; nent < TSTAT_TOTAL_NENT; nent++) { 15704732Sdavemq if (tstat_enabled[nent]) { 15714732Sdavemq bcopy(enabled, ent, sizeof (enabled)); 15724732Sdavemq ent[TSTAT_ENABLED_STATHI] |= HI22((uintptr_t)stat); 15734732Sdavemq ent[TSTAT_ENABLED_STATLO] |= LO10((uintptr_t)stat); 15744732Sdavemq ent[TSTAT_ENABLED_ADDRHI] |= HI22((uintptr_t)orig); 15754732Sdavemq ent[TSTAT_ENABLED_ADDRLO] |= LO10((uintptr_t)orig); 15764732Sdavemq ent[TSTAT_ENABLED_CONTBA] |= 15774732Sdavemq DISP22(va + en_baoffs, tstat_cont_va); 15784732Sdavemq ent[TSTAT_ENABLED_TDATASHFT] |= 15794732Sdavemq LO10((uintptr_t)TSTAT_DATA_SHIFT); 15804732Sdavemq } else { 15814732Sdavemq bcopy(disabled, ent, sizeof (disabled)); 15824732Sdavemq ent[TSTAT_DISABLED_BA] |= DISP22(va + dis_baoffs, orig); 15834732Sdavemq } 15844732Sdavemq 15854732Sdavemq stat++; 15864732Sdavemq orig += sizeof (enabled); 15874732Sdavemq ent += sizeof (enabled) / sizeof (*ent); 15884732Sdavemq va += sizeof (enabled); 15894732Sdavemq } 15904732Sdavemq bcopy(enabled_cont, (uint32_t *)tcpu->tcpu_instr->tinst_trapcnt, 15914732Sdavemq sizeof (enabled_cont)); 15924732Sdavemq } 15934732Sdavemq 15944732Sdavemq #undef TSTAT_ENABLED_TDATASHFT 15954732Sdavemq #undef TSTAT_ENABLED_STATHI 15964732Sdavemq #undef TSTAT_ENABLED_STATLO 15974732Sdavemq #undef TSTAT_ENABLED_ADDRHI 15984732Sdavemq #undef TSTAT_ENABLED_ADDRLO 15994732Sdavemq #undef TSTAT_ENABLED_CONTBA 16004732Sdavemq #undef TSTAT_DISABLED_BA 16014732Sdavemq 16024732Sdavemq #endif /* sun4v */ 16034732Sdavemq 16041772Sjl139090 #ifndef sun4v 16051772Sjl139090 /* 16061772Sjl139090 * See Section A.6 in SPARC v9 Manual. 16071772Sjl139090 * max branch = 4*((2^21)-1) = 8388604 16081772Sjl139090 */ 16091772Sjl139090 #define MAX_BICC_BRANCH_DISPLACEMENT (4 * ((1 << 21) - 1)) 16101772Sjl139090 #endif 16111772Sjl139090 16120Sstevel@tonic-gate static void 16130Sstevel@tonic-gate trapstat_setup(processorid_t cpu) 16140Sstevel@tonic-gate { 16150Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[cpu]; 16160Sstevel@tonic-gate #ifndef sun4v 16170Sstevel@tonic-gate int i; 16180Sstevel@tonic-gate caddr_t va; 16190Sstevel@tonic-gate pfn_t *pfn; 16201772Sjl139090 cpu_t *cp; 16211772Sjl139090 uint_t strand_idx; 16221772Sjl139090 size_t tstat_offset; 16230Sstevel@tonic-gate #endif 16240Sstevel@tonic-gate 16250Sstevel@tonic-gate ASSERT(tcpu->tcpu_pfn == NULL); 16260Sstevel@tonic-gate ASSERT(tcpu->tcpu_instr == NULL); 16270Sstevel@tonic-gate ASSERT(tcpu->tcpu_data == NULL); 16280Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED); 16290Sstevel@tonic-gate ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED)); 16300Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 16310Sstevel@tonic-gate ASSERT(MUTEX_HELD(&tstat_lock)); 16320Sstevel@tonic-gate 16334732Sdavemq #ifndef sun4v 16340Sstevel@tonic-gate /* 16350Sstevel@tonic-gate * The lower fifteen bits of the %tba are always read as zero; we must 16360Sstevel@tonic-gate * align our instruction base address appropriately. 16370Sstevel@tonic-gate */ 16381772Sjl139090 tstat_offset = tstat_total_size; 16391772Sjl139090 16401772Sjl139090 cp = cpu_get(cpu); 16411772Sjl139090 ASSERT(cp != NULL); 16423434Sesaxe if ((strand_idx = cpu ^ pg_plat_hw_instance_id(cp, PGHW_IPIPE)) != 0) { 16431772Sjl139090 /* 16441772Sjl139090 * On sun4u platforms with multiple CPUs sharing the MMU 16451772Sjl139090 * (Olympus-C has 2 strands per core), each CPU uses a 16461772Sjl139090 * disjoint trap table. The indexing is based on the 16471772Sjl139090 * strand id, which is obtained by XOR'ing the cpuid with 16481772Sjl139090 * the coreid. 16491772Sjl139090 */ 16501772Sjl139090 tstat_offset += tstat_total_size * strand_idx; 16511772Sjl139090 16521772Sjl139090 /* 16531772Sjl139090 * Offset must be less than the maximum PC-relative branch 16541772Sjl139090 * displacement for Bicc variants. See the Implementation 16551772Sjl139090 * Details comment. 16561772Sjl139090 */ 16571772Sjl139090 ASSERT(tstat_offset <= MAX_BICC_BRANCH_DISPLACEMENT); 16581772Sjl139090 } 16591772Sjl139090 16601772Sjl139090 tcpu->tcpu_ibase = (caddr_t)((KERNELBASE - tstat_offset) 16614732Sdavemq & TSTAT_TBA_MASK); 16620Sstevel@tonic-gate tcpu->tcpu_dbase = tcpu->tcpu_ibase + TSTAT_INSTR_SIZE; 16630Sstevel@tonic-gate tcpu->tcpu_vabase = tcpu->tcpu_ibase; 16640Sstevel@tonic-gate 16650Sstevel@tonic-gate tcpu->tcpu_pfn = vmem_alloc(tstat_arena, tstat_total_pages, VM_SLEEP); 16660Sstevel@tonic-gate bzero(tcpu->tcpu_pfn, tstat_total_pages); 16670Sstevel@tonic-gate pfn = tcpu->tcpu_pfn; 16680Sstevel@tonic-gate 16690Sstevel@tonic-gate tcpu->tcpu_instr = vmem_alloc(tstat_arena, TSTAT_INSTR_SIZE, VM_SLEEP); 16700Sstevel@tonic-gate 16710Sstevel@tonic-gate va = (caddr_t)tcpu->tcpu_instr; 16720Sstevel@tonic-gate for (i = 0; i < TSTAT_INSTR_PAGES; i++, va += MMU_PAGESIZE) 16730Sstevel@tonic-gate *pfn++ = hat_getpfnum(kas.a_hat, va); 16740Sstevel@tonic-gate 16750Sstevel@tonic-gate /* 16760Sstevel@tonic-gate * We must be sure that the pages that we will use to examine the data 16770Sstevel@tonic-gate * have the same virtual color as the pages to which the data is being 16780Sstevel@tonic-gate * recorded, hence the alignment and phase constraints on the 16790Sstevel@tonic-gate * allocation. 16800Sstevel@tonic-gate */ 16810Sstevel@tonic-gate tcpu->tcpu_data = vmem_xalloc(tstat_arena, tstat_data_size, 16820Sstevel@tonic-gate shm_alignment, (uintptr_t)tcpu->tcpu_dbase & (shm_alignment - 1), 16830Sstevel@tonic-gate 0, 0, NULL, VM_SLEEP); 16840Sstevel@tonic-gate bzero(tcpu->tcpu_data, tstat_data_size); 16850Sstevel@tonic-gate tcpu->tcpu_data->tdata_cpuid = cpu; 16860Sstevel@tonic-gate 16870Sstevel@tonic-gate va = (caddr_t)tcpu->tcpu_data; 16880Sstevel@tonic-gate for (i = 0; i < tstat_data_pages; i++, va += MMU_PAGESIZE) 16890Sstevel@tonic-gate *pfn++ = hat_getpfnum(kas.a_hat, va); 16900Sstevel@tonic-gate 16910Sstevel@tonic-gate /* 16920Sstevel@tonic-gate * Now that we have all of the instruction and data pages allocated, 16930Sstevel@tonic-gate * make the trap table from scratch. 16940Sstevel@tonic-gate */ 16950Sstevel@tonic-gate trapstat_make_traptab(tcpu); 16960Sstevel@tonic-gate 16970Sstevel@tonic-gate if (tstat_options & TSTAT_OPT_TLBDATA) { 16980Sstevel@tonic-gate /* 16990Sstevel@tonic-gate * TLB Statistics have been specified; set up the I- and D-TLB 17000Sstevel@tonic-gate * entries and corresponding TLB return entries. 17010Sstevel@tonic-gate */ 17020Sstevel@tonic-gate trapstat_tlbent(tcpu, TSTAT_ENT_ITLBMISS); 17030Sstevel@tonic-gate trapstat_tlbent(tcpu, TSTAT_ENT_DTLBMISS); 17044732Sdavemq } 17054732Sdavemq 17064732Sdavemq #else /* sun4v */ 17074732Sdavemq 17084732Sdavemq /* 17094732Sdavemq * The lower fifteen bits of the %tba are always read as zero; hence 17104732Sdavemq * it must be aligned at least on 512K boundary. 17114732Sdavemq */ 17124732Sdavemq tcpu->tcpu_vabase = (caddr_t)(KERNELBASE - MMU_PAGESIZE4M); 17134732Sdavemq tcpu->tcpu_ibase = tcpu->tcpu_vabase; 17144732Sdavemq tcpu->tcpu_dbase = tcpu->tcpu_ibase + TSTAT_INSTR_SIZE + 17154732Sdavemq cpu * TSTAT_DATA_SIZE; 17164732Sdavemq 17174732Sdavemq tcpu->tcpu_pfn = &tstat_pfn; 17184732Sdavemq tcpu->tcpu_instr = (tstat_instr_t *)tstat_va; 17194732Sdavemq tcpu->tcpu_data = (tstat_data_t *)(tstat_va + TSTAT_INSTR_SIZE + 17204732Sdavemq cpu * TSTAT_DATA_SIZE); 17214732Sdavemq bzero(tcpu->tcpu_data, TSTAT_DATA_SIZE); 17224732Sdavemq tcpu->tcpu_data->tdata_cpuid = cpu; 17234732Sdavemq 17244732Sdavemq /* 17254732Sdavemq * Now that we have all of the instruction and data pages allocated, 17264732Sdavemq * make the trap table from scratch. It should be done only once 17274732Sdavemq * as it is shared by all CPUs. 17284732Sdavemq */ 17294732Sdavemq if (!tstat_traptab_initialized) 17304732Sdavemq trapstat_make_traptab(tcpu); 17314732Sdavemq 17324732Sdavemq if (tstat_options & TSTAT_OPT_TLBDATA) { 17334732Sdavemq /* 17344732Sdavemq * TLB Statistics have been specified; set up the I- and D-TLB 17354732Sdavemq * entries and corresponding TLB return entries. 17364732Sdavemq */ 17374732Sdavemq if (!tstat_traptab_initialized) { 17384732Sdavemq if (tstat_fast_tlbstat) { 17394732Sdavemq trapstat_tlbent(tcpu, TSTAT_ENT_IMMUMISS); 17404732Sdavemq trapstat_tlbent(tcpu, TSTAT_ENT_DMMUMISS); 17414732Sdavemq } else { 17424732Sdavemq trapstat_tlbent(tcpu, TSTAT_ENT_ITLBMISS); 17434732Sdavemq trapstat_tlbent(tcpu, TSTAT_ENT_DTLBMISS); 17444732Sdavemq } 17451050Sgirish } 17460Sstevel@tonic-gate } 17474732Sdavemq tstat_traptab_initialized = 1; 17484732Sdavemq #endif /* sun4v */ 17490Sstevel@tonic-gate 17500Sstevel@tonic-gate tcpu->tcpu_flags |= TSTAT_CPU_ALLOCATED; 17510Sstevel@tonic-gate 17520Sstevel@tonic-gate /* 17530Sstevel@tonic-gate * Finally, get the target CPU to load the locked pages into its TLBs. 17540Sstevel@tonic-gate */ 17550Sstevel@tonic-gate xc_one(cpu, (xcfunc_t *)trapstat_load_tlb, 0, 0); 17560Sstevel@tonic-gate } 17570Sstevel@tonic-gate 17580Sstevel@tonic-gate static void 17590Sstevel@tonic-gate trapstat_teardown(processorid_t cpu) 17600Sstevel@tonic-gate { 17610Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[cpu]; 17620Sstevel@tonic-gate #ifndef sun4v 17630Sstevel@tonic-gate int i; 17640Sstevel@tonic-gate #endif 17650Sstevel@tonic-gate caddr_t va = tcpu->tcpu_vabase; 17660Sstevel@tonic-gate 17670Sstevel@tonic-gate ASSERT(tcpu->tcpu_pfn != NULL); 17680Sstevel@tonic-gate ASSERT(tcpu->tcpu_instr != NULL); 17690Sstevel@tonic-gate ASSERT(tcpu->tcpu_data != NULL); 17700Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED); 17710Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED); 17720Sstevel@tonic-gate ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED)); 17730Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 17740Sstevel@tonic-gate ASSERT(MUTEX_HELD(&tstat_lock)); 17750Sstevel@tonic-gate 17760Sstevel@tonic-gate #ifndef sun4v 17770Sstevel@tonic-gate vmem_free(tstat_arena, tcpu->tcpu_pfn, tstat_total_pages); 17780Sstevel@tonic-gate vmem_free(tstat_arena, tcpu->tcpu_instr, TSTAT_INSTR_SIZE); 17790Sstevel@tonic-gate vmem_free(tstat_arena, tcpu->tcpu_data, tstat_data_size); 17800Sstevel@tonic-gate 17810Sstevel@tonic-gate for (i = 0; i < tstat_total_pages; i++, va += MMU_PAGESIZE) { 17822241Shuah xt_one(cpu, vtag_flushpage_tl1, (uint64_t)va, 17832241Shuah (uint64_t)ksfmmup); 17840Sstevel@tonic-gate } 17850Sstevel@tonic-gate #else 17860Sstevel@tonic-gate xt_one(cpu, vtag_unmap_perm_tl1, (uint64_t)va, KCONTEXT); 17870Sstevel@tonic-gate #endif 17880Sstevel@tonic-gate 17890Sstevel@tonic-gate tcpu->tcpu_pfn = NULL; 17900Sstevel@tonic-gate tcpu->tcpu_instr = NULL; 17910Sstevel@tonic-gate tcpu->tcpu_data = NULL; 17920Sstevel@tonic-gate tcpu->tcpu_flags &= ~TSTAT_CPU_ALLOCATED; 17930Sstevel@tonic-gate } 17940Sstevel@tonic-gate 17950Sstevel@tonic-gate static int 17960Sstevel@tonic-gate trapstat_go() 17970Sstevel@tonic-gate { 17980Sstevel@tonic-gate cpu_t *cp; 17990Sstevel@tonic-gate 18000Sstevel@tonic-gate mutex_enter(&cpu_lock); 18010Sstevel@tonic-gate mutex_enter(&tstat_lock); 18020Sstevel@tonic-gate 18030Sstevel@tonic-gate if (tstat_running) { 18040Sstevel@tonic-gate mutex_exit(&tstat_lock); 18050Sstevel@tonic-gate mutex_exit(&cpu_lock); 18060Sstevel@tonic-gate return (EBUSY); 18070Sstevel@tonic-gate } 18080Sstevel@tonic-gate 18090Sstevel@tonic-gate #ifdef sun4v 18100Sstevel@tonic-gate /* 18111050Sgirish * Allocate large page to hold interposing tables. 18120Sstevel@tonic-gate */ 18130Sstevel@tonic-gate tstat_va = contig_mem_alloc(MMU_PAGESIZE4M); 18140Sstevel@tonic-gate tstat_pfn = va_to_pfn(tstat_va); 18154204Sha137994 if (tstat_pfn == PFN_INVALID) { 18164204Sha137994 mutex_exit(&tstat_lock); 18174204Sha137994 mutex_exit(&cpu_lock); 18180Sstevel@tonic-gate return (EAGAIN); 18194204Sha137994 } 18201050Sgirish 18211050Sgirish /* 18221050Sgirish * For detailed TLB statistics, invoke CPU specific interface 18231050Sgirish * to see if it supports a low overhead interface to collect 18241050Sgirish * TSB hit statistics. If so, make set tstat_fast_tlbstat flag 18251050Sgirish * to reflect that. 18261050Sgirish */ 18271050Sgirish if (tstat_options & TSTAT_OPT_TLBDATA) { 18281050Sgirish int error; 18291050Sgirish 18304732Sdavemq tstat_fast_tlbstat = B_FALSE; 18311050Sgirish error = cpu_trapstat_conf(CPU_TSTATCONF_INIT); 18321050Sgirish if (error == 0) 18331050Sgirish tstat_fast_tlbstat = B_TRUE; 18341050Sgirish else if (error != ENOTSUP) { 18351050Sgirish contig_mem_free(tstat_va, MMU_PAGESIZE4M); 18364204Sha137994 mutex_exit(&tstat_lock); 18374204Sha137994 mutex_exit(&cpu_lock); 18381050Sgirish return (error); 18391050Sgirish } 18400Sstevel@tonic-gate } 18414732Sdavemq #endif /* sun4v */ 18420Sstevel@tonic-gate 18430Sstevel@tonic-gate /* 18440Sstevel@tonic-gate * First, perform any necessary hot patching. 18450Sstevel@tonic-gate */ 18460Sstevel@tonic-gate trapstat_hotpatch(); 18470Sstevel@tonic-gate 18480Sstevel@tonic-gate /* 18490Sstevel@tonic-gate * Allocate the resources we'll need to measure probe effect. 18500Sstevel@tonic-gate */ 18510Sstevel@tonic-gate trapstat_probe_alloc(); 18520Sstevel@tonic-gate 18530Sstevel@tonic-gate 18540Sstevel@tonic-gate cp = cpu_list; 18550Sstevel@tonic-gate do { 18560Sstevel@tonic-gate if (!(tstat_percpu[cp->cpu_id].tcpu_flags & TSTAT_CPU_SELECTED)) 18570Sstevel@tonic-gate continue; 18580Sstevel@tonic-gate 18590Sstevel@tonic-gate trapstat_setup(cp->cpu_id); 18600Sstevel@tonic-gate 18610Sstevel@tonic-gate /* 18620Sstevel@tonic-gate * Note that due to trapstat_probe()'s use of global data, 18630Sstevel@tonic-gate * we determine the probe effect on each CPU serially instead 18640Sstevel@tonic-gate * of in parallel with an xc_all(). 18650Sstevel@tonic-gate */ 18660Sstevel@tonic-gate xc_one(cp->cpu_id, (xcfunc_t *)trapstat_probe, 0, 0); 18670Sstevel@tonic-gate } while ((cp = cp->cpu_next) != cpu_list); 18680Sstevel@tonic-gate 18690Sstevel@tonic-gate xc_all((xcfunc_t *)trapstat_enable, 0, 0); 18700Sstevel@tonic-gate 18710Sstevel@tonic-gate trapstat_probe_free(); 18720Sstevel@tonic-gate tstat_running = 1; 18730Sstevel@tonic-gate mutex_exit(&tstat_lock); 18740Sstevel@tonic-gate mutex_exit(&cpu_lock); 18750Sstevel@tonic-gate 18760Sstevel@tonic-gate return (0); 18770Sstevel@tonic-gate } 18780Sstevel@tonic-gate 18790Sstevel@tonic-gate static int 18800Sstevel@tonic-gate trapstat_stop() 18810Sstevel@tonic-gate { 18820Sstevel@tonic-gate int i; 18830Sstevel@tonic-gate 18840Sstevel@tonic-gate mutex_enter(&cpu_lock); 18850Sstevel@tonic-gate mutex_enter(&tstat_lock); 18860Sstevel@tonic-gate if (!tstat_running) { 18870Sstevel@tonic-gate mutex_exit(&tstat_lock); 18880Sstevel@tonic-gate mutex_exit(&cpu_lock); 18890Sstevel@tonic-gate return (ENXIO); 18900Sstevel@tonic-gate } 18910Sstevel@tonic-gate 18920Sstevel@tonic-gate xc_all((xcfunc_t *)trapstat_disable, 0, 0); 18930Sstevel@tonic-gate 18940Sstevel@tonic-gate for (i = 0; i <= max_cpuid; i++) { 18950Sstevel@tonic-gate if (tstat_percpu[i].tcpu_flags & TSTAT_CPU_ALLOCATED) 18960Sstevel@tonic-gate trapstat_teardown(i); 18970Sstevel@tonic-gate } 18980Sstevel@tonic-gate 18990Sstevel@tonic-gate #ifdef sun4v 19004732Sdavemq tstat_traptab_initialized = 0; 19011050Sgirish if (tstat_options & TSTAT_OPT_TLBDATA) 19021050Sgirish cpu_trapstat_conf(CPU_TSTATCONF_FINI); 19030Sstevel@tonic-gate contig_mem_free(tstat_va, MMU_PAGESIZE4M); 19040Sstevel@tonic-gate #endif 19050Sstevel@tonic-gate trapstat_hotpatch(); 19060Sstevel@tonic-gate tstat_running = 0; 19070Sstevel@tonic-gate mutex_exit(&tstat_lock); 19080Sstevel@tonic-gate mutex_exit(&cpu_lock); 19090Sstevel@tonic-gate 19100Sstevel@tonic-gate return (0); 19110Sstevel@tonic-gate } 19120Sstevel@tonic-gate 19130Sstevel@tonic-gate /* 19140Sstevel@tonic-gate * This is trapstat's DR CPU configuration callback. It's called (with 19150Sstevel@tonic-gate * cpu_lock held) to unconfigure a newly powered-off CPU, or to configure a 19160Sstevel@tonic-gate * powered-off CPU that is to be brought into the system. We need only take 19170Sstevel@tonic-gate * action in the unconfigure case: because a powered-off CPU will have its 19180Sstevel@tonic-gate * trap table restored to KERNELBASE if it is ever powered back on, we must 19190Sstevel@tonic-gate * update the flags to reflect that trapstat is no longer enabled on the 19200Sstevel@tonic-gate * powered-off CPU. Note that this means that a TSTAT_CPU_ENABLED CPU that 19210Sstevel@tonic-gate * is unconfigured/powered off and later powered back on/reconfigured will 19220Sstevel@tonic-gate * _not_ be re-TSTAT_CPU_ENABLED. 19230Sstevel@tonic-gate */ 19240Sstevel@tonic-gate static int 19250Sstevel@tonic-gate trapstat_cpu_setup(cpu_setup_t what, processorid_t cpu) 19260Sstevel@tonic-gate { 19270Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[cpu]; 19280Sstevel@tonic-gate 19290Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); 19300Sstevel@tonic-gate mutex_enter(&tstat_lock); 19310Sstevel@tonic-gate 19320Sstevel@tonic-gate if (!tstat_running) { 19330Sstevel@tonic-gate mutex_exit(&tstat_lock); 19340Sstevel@tonic-gate return (0); 19350Sstevel@tonic-gate } 19360Sstevel@tonic-gate 19370Sstevel@tonic-gate switch (what) { 19380Sstevel@tonic-gate case CPU_CONFIG: 19390Sstevel@tonic-gate ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED)); 19400Sstevel@tonic-gate break; 19410Sstevel@tonic-gate 19420Sstevel@tonic-gate case CPU_UNCONFIG: 19431991Sheppo if (tcpu->tcpu_flags & TSTAT_CPU_ENABLED) { 19440Sstevel@tonic-gate tcpu->tcpu_flags &= ~TSTAT_CPU_ENABLED; 19451991Sheppo #ifdef sun4v 19461991Sheppo /* 19471991Sheppo * A power-off, causes the cpu mondo queues to be 19481991Sheppo * unconfigured on sun4v. Since we can't teardown 19491991Sheppo * trapstat's mappings on the cpu that is going away, 19501991Sheppo * we simply mark it as not allocated. This will 19511991Sheppo * prevent a teardown on a cpu with the same cpu id 19521991Sheppo * that might have been added while trapstat is running. 19531991Sheppo */ 19541991Sheppo if (tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED) { 19551991Sheppo tcpu->tcpu_pfn = NULL; 19561991Sheppo tcpu->tcpu_instr = NULL; 19571991Sheppo tcpu->tcpu_data = NULL; 19581991Sheppo tcpu->tcpu_flags &= ~TSTAT_CPU_ALLOCATED; 19591991Sheppo } 19601991Sheppo #endif 19611991Sheppo } 19620Sstevel@tonic-gate break; 19630Sstevel@tonic-gate 19640Sstevel@tonic-gate default: 19650Sstevel@tonic-gate break; 19660Sstevel@tonic-gate } 19670Sstevel@tonic-gate 19680Sstevel@tonic-gate mutex_exit(&tstat_lock); 19690Sstevel@tonic-gate return (0); 19700Sstevel@tonic-gate } 19710Sstevel@tonic-gate 19720Sstevel@tonic-gate /* 19730Sstevel@tonic-gate * This is called before a CPR suspend and after a CPR resume. We don't have 19740Sstevel@tonic-gate * anything to do before a suspend, but after a restart we must restore the 19750Sstevel@tonic-gate * trap table to be our interposing trap table. However, we don't actually 19760Sstevel@tonic-gate * know whether or not the CPUs have been powered off -- this routine may be 19770Sstevel@tonic-gate * called while restoring from a failed CPR suspend. We thus run through each 19780Sstevel@tonic-gate * TSTAT_CPU_ENABLED CPU, and explicitly destroy and reestablish its 19790Sstevel@tonic-gate * interposing trap table. This assures that our state is correct regardless 19800Sstevel@tonic-gate * of whether or not the CPU has been newly powered on. 19810Sstevel@tonic-gate */ 19820Sstevel@tonic-gate /*ARGSUSED*/ 19830Sstevel@tonic-gate static boolean_t 19840Sstevel@tonic-gate trapstat_cpr(void *arg, int code) 19850Sstevel@tonic-gate { 19860Sstevel@tonic-gate cpu_t *cp; 19870Sstevel@tonic-gate 19880Sstevel@tonic-gate if (code == CB_CODE_CPR_CHKPT) 19890Sstevel@tonic-gate return (B_TRUE); 19900Sstevel@tonic-gate 19910Sstevel@tonic-gate ASSERT(code == CB_CODE_CPR_RESUME); 19920Sstevel@tonic-gate 19930Sstevel@tonic-gate mutex_enter(&cpu_lock); 19940Sstevel@tonic-gate mutex_enter(&tstat_lock); 19950Sstevel@tonic-gate 19960Sstevel@tonic-gate if (!tstat_running) { 19970Sstevel@tonic-gate mutex_exit(&tstat_lock); 19980Sstevel@tonic-gate mutex_exit(&cpu_lock); 19990Sstevel@tonic-gate return (B_TRUE); 20000Sstevel@tonic-gate } 20010Sstevel@tonic-gate 20020Sstevel@tonic-gate cp = cpu_list; 20030Sstevel@tonic-gate do { 20040Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[cp->cpu_id]; 20050Sstevel@tonic-gate 20060Sstevel@tonic-gate if (!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED)) 20070Sstevel@tonic-gate continue; 20080Sstevel@tonic-gate 20090Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED); 20100Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED); 20110Sstevel@tonic-gate 20120Sstevel@tonic-gate xc_one(cp->cpu_id, (xcfunc_t *)trapstat_disable, 0, 0); 20130Sstevel@tonic-gate ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED)); 20140Sstevel@tonic-gate 20150Sstevel@tonic-gate /* 20160Sstevel@tonic-gate * Preserve this CPU's data in tstat_buffer and rip down its 20170Sstevel@tonic-gate * interposing trap table. 20180Sstevel@tonic-gate */ 20190Sstevel@tonic-gate bcopy(tcpu->tcpu_data, tstat_buffer, tstat_data_t_size); 20200Sstevel@tonic-gate trapstat_teardown(cp->cpu_id); 20210Sstevel@tonic-gate ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED)); 20220Sstevel@tonic-gate 20230Sstevel@tonic-gate /* 20240Sstevel@tonic-gate * Reestablish the interposing trap table and restore the old 20250Sstevel@tonic-gate * data. 20260Sstevel@tonic-gate */ 20270Sstevel@tonic-gate trapstat_setup(cp->cpu_id); 20280Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED); 20290Sstevel@tonic-gate bcopy(tstat_buffer, tcpu->tcpu_data, tstat_data_t_size); 20300Sstevel@tonic-gate 20310Sstevel@tonic-gate xc_one(cp->cpu_id, (xcfunc_t *)trapstat_enable, 0, 0); 20320Sstevel@tonic-gate } while ((cp = cp->cpu_next) != cpu_list); 20330Sstevel@tonic-gate 20340Sstevel@tonic-gate mutex_exit(&tstat_lock); 20350Sstevel@tonic-gate mutex_exit(&cpu_lock); 20360Sstevel@tonic-gate 20370Sstevel@tonic-gate return (B_TRUE); 20380Sstevel@tonic-gate } 20390Sstevel@tonic-gate 20400Sstevel@tonic-gate /*ARGSUSED*/ 20410Sstevel@tonic-gate static int 20420Sstevel@tonic-gate trapstat_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 20430Sstevel@tonic-gate { 20440Sstevel@tonic-gate int i; 20450Sstevel@tonic-gate 20460Sstevel@tonic-gate mutex_enter(&cpu_lock); 20470Sstevel@tonic-gate mutex_enter(&tstat_lock); 20480Sstevel@tonic-gate if (tstat_open != 0) { 20490Sstevel@tonic-gate mutex_exit(&tstat_lock); 20500Sstevel@tonic-gate mutex_exit(&cpu_lock); 20510Sstevel@tonic-gate return (EBUSY); 20520Sstevel@tonic-gate } 20530Sstevel@tonic-gate 20540Sstevel@tonic-gate /* 20550Sstevel@tonic-gate * Register this in open() rather than in attach() to prevent deadlock 20560Sstevel@tonic-gate * with DR code. During attach, I/O device tree locks are grabbed 20570Sstevel@tonic-gate * before trapstat_attach() is invoked - registering in attach 20580Sstevel@tonic-gate * will result in the lock order: device tree lock, cpu_lock. 20590Sstevel@tonic-gate * DR code however requires that cpu_lock be acquired before 20600Sstevel@tonic-gate * device tree locks. 20610Sstevel@tonic-gate */ 20620Sstevel@tonic-gate ASSERT(!tstat_running); 20630Sstevel@tonic-gate register_cpu_setup_func((cpu_setup_func_t *)trapstat_cpu_setup, NULL); 20640Sstevel@tonic-gate 20650Sstevel@tonic-gate /* 20660Sstevel@tonic-gate * Clear all options. And until specific CPUs are specified, we'll 20670Sstevel@tonic-gate * mark all CPUs as selected. 20680Sstevel@tonic-gate */ 20690Sstevel@tonic-gate tstat_options = 0; 20700Sstevel@tonic-gate 20710Sstevel@tonic-gate for (i = 0; i <= max_cpuid; i++) 20720Sstevel@tonic-gate tstat_percpu[i].tcpu_flags |= TSTAT_CPU_SELECTED; 20730Sstevel@tonic-gate 20740Sstevel@tonic-gate /* 20750Sstevel@tonic-gate * By default, all traps at TL=0 are enabled. Traps at TL>0 must 20760Sstevel@tonic-gate * be disabled. 20770Sstevel@tonic-gate */ 20780Sstevel@tonic-gate for (i = 0; i < TSTAT_TOTAL_NENT; i++) 20790Sstevel@tonic-gate tstat_enabled[i] = i < TSTAT_NENT ? 1 : 0; 20800Sstevel@tonic-gate 20810Sstevel@tonic-gate tstat_open = 1; 20820Sstevel@tonic-gate mutex_exit(&tstat_lock); 20830Sstevel@tonic-gate mutex_exit(&cpu_lock); 20840Sstevel@tonic-gate 20850Sstevel@tonic-gate return (0); 20860Sstevel@tonic-gate } 20870Sstevel@tonic-gate 20880Sstevel@tonic-gate /*ARGSUSED*/ 20890Sstevel@tonic-gate static int 20900Sstevel@tonic-gate trapstat_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 20910Sstevel@tonic-gate { 20920Sstevel@tonic-gate (void) trapstat_stop(); 20930Sstevel@tonic-gate 20940Sstevel@tonic-gate ASSERT(!tstat_running); 20950Sstevel@tonic-gate 20960Sstevel@tonic-gate mutex_enter(&cpu_lock); 20970Sstevel@tonic-gate unregister_cpu_setup_func((cpu_setup_func_t *)trapstat_cpu_setup, NULL); 20980Sstevel@tonic-gate mutex_exit(&cpu_lock); 20990Sstevel@tonic-gate 21000Sstevel@tonic-gate tstat_open = 0; 21010Sstevel@tonic-gate return (DDI_SUCCESS); 21020Sstevel@tonic-gate } 21030Sstevel@tonic-gate 21040Sstevel@tonic-gate static int 21050Sstevel@tonic-gate trapstat_option(int option) 21060Sstevel@tonic-gate { 21070Sstevel@tonic-gate mutex_enter(&tstat_lock); 21080Sstevel@tonic-gate 21090Sstevel@tonic-gate if (tstat_running) { 21100Sstevel@tonic-gate mutex_exit(&tstat_lock); 21110Sstevel@tonic-gate return (EBUSY); 21120Sstevel@tonic-gate } 21130Sstevel@tonic-gate 21140Sstevel@tonic-gate tstat_options |= option; 21150Sstevel@tonic-gate mutex_exit(&tstat_lock); 21160Sstevel@tonic-gate 21170Sstevel@tonic-gate return (0); 21180Sstevel@tonic-gate } 21190Sstevel@tonic-gate 21200Sstevel@tonic-gate /*ARGSUSED*/ 21210Sstevel@tonic-gate static int 21220Sstevel@tonic-gate trapstat_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *crd, int *rval) 21230Sstevel@tonic-gate { 21240Sstevel@tonic-gate int i, j, out; 21250Sstevel@tonic-gate size_t dsize; 21260Sstevel@tonic-gate 21270Sstevel@tonic-gate switch (cmd) { 21280Sstevel@tonic-gate case TSTATIOC_GO: 21290Sstevel@tonic-gate return (trapstat_go()); 21300Sstevel@tonic-gate 21310Sstevel@tonic-gate case TSTATIOC_NOGO: 21320Sstevel@tonic-gate return (trapstat_option(TSTAT_OPT_NOGO)); 21330Sstevel@tonic-gate 21340Sstevel@tonic-gate case TSTATIOC_STOP: 21350Sstevel@tonic-gate return (trapstat_stop()); 21360Sstevel@tonic-gate 21370Sstevel@tonic-gate case TSTATIOC_CPU: 21380Sstevel@tonic-gate if (arg < 0 || arg > max_cpuid) 21390Sstevel@tonic-gate return (EINVAL); 21400Sstevel@tonic-gate /*FALLTHROUGH*/ 21410Sstevel@tonic-gate 21420Sstevel@tonic-gate case TSTATIOC_NOCPU: 21430Sstevel@tonic-gate mutex_enter(&tstat_lock); 21440Sstevel@tonic-gate 21450Sstevel@tonic-gate if (tstat_running) { 21460Sstevel@tonic-gate mutex_exit(&tstat_lock); 21470Sstevel@tonic-gate return (EBUSY); 21480Sstevel@tonic-gate } 21490Sstevel@tonic-gate 21500Sstevel@tonic-gate /* 21510Sstevel@tonic-gate * If this is the first CPU to be specified (or if we are 21520Sstevel@tonic-gate * being asked to explicitly de-select CPUs), disable all CPUs. 21530Sstevel@tonic-gate */ 21540Sstevel@tonic-gate if (!(tstat_options & TSTAT_OPT_CPU) || cmd == TSTATIOC_NOCPU) { 21550Sstevel@tonic-gate tstat_options |= TSTAT_OPT_CPU; 21560Sstevel@tonic-gate 21570Sstevel@tonic-gate for (i = 0; i <= max_cpuid; i++) { 21580Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[i]; 21590Sstevel@tonic-gate 21600Sstevel@tonic-gate ASSERT(cmd == TSTATIOC_NOCPU || 21610Sstevel@tonic-gate (tcpu->tcpu_flags & TSTAT_CPU_SELECTED)); 21620Sstevel@tonic-gate tcpu->tcpu_flags &= ~TSTAT_CPU_SELECTED; 21630Sstevel@tonic-gate } 21640Sstevel@tonic-gate } 21650Sstevel@tonic-gate 21660Sstevel@tonic-gate if (cmd == TSTATIOC_CPU) 21670Sstevel@tonic-gate tstat_percpu[arg].tcpu_flags |= TSTAT_CPU_SELECTED; 21680Sstevel@tonic-gate 21690Sstevel@tonic-gate mutex_exit(&tstat_lock); 21700Sstevel@tonic-gate 21710Sstevel@tonic-gate return (0); 21720Sstevel@tonic-gate 21730Sstevel@tonic-gate case TSTATIOC_ENTRY: 21740Sstevel@tonic-gate mutex_enter(&tstat_lock); 21750Sstevel@tonic-gate 21760Sstevel@tonic-gate if (tstat_running) { 21770Sstevel@tonic-gate mutex_exit(&tstat_lock); 21780Sstevel@tonic-gate return (EBUSY); 21790Sstevel@tonic-gate } 21800Sstevel@tonic-gate 21810Sstevel@tonic-gate if (arg >= TSTAT_NENT || arg < 0) { 21820Sstevel@tonic-gate mutex_exit(&tstat_lock); 21830Sstevel@tonic-gate return (EINVAL); 21840Sstevel@tonic-gate } 21850Sstevel@tonic-gate 21860Sstevel@tonic-gate if (!(tstat_options & TSTAT_OPT_ENTRY)) { 21870Sstevel@tonic-gate /* 21880Sstevel@tonic-gate * If this is the first entry that we are explicitly 21890Sstevel@tonic-gate * enabling, explicitly disable every TL=0 entry. 21900Sstevel@tonic-gate */ 21910Sstevel@tonic-gate for (i = 0; i < TSTAT_NENT; i++) 21920Sstevel@tonic-gate tstat_enabled[i] = 0; 21930Sstevel@tonic-gate 21940Sstevel@tonic-gate tstat_options |= TSTAT_OPT_ENTRY; 21950Sstevel@tonic-gate } 21960Sstevel@tonic-gate 21970Sstevel@tonic-gate tstat_enabled[arg] = 1; 21980Sstevel@tonic-gate mutex_exit(&tstat_lock); 21990Sstevel@tonic-gate return (0); 22000Sstevel@tonic-gate 22010Sstevel@tonic-gate case TSTATIOC_NOENTRY: 22020Sstevel@tonic-gate mutex_enter(&tstat_lock); 22030Sstevel@tonic-gate 22040Sstevel@tonic-gate if (tstat_running) { 22050Sstevel@tonic-gate mutex_exit(&tstat_lock); 22060Sstevel@tonic-gate return (EBUSY); 22070Sstevel@tonic-gate } 22080Sstevel@tonic-gate 22090Sstevel@tonic-gate for (i = 0; i < TSTAT_NENT; i++) 22100Sstevel@tonic-gate tstat_enabled[i] = 0; 22110Sstevel@tonic-gate 22120Sstevel@tonic-gate mutex_exit(&tstat_lock); 22130Sstevel@tonic-gate return (0); 22140Sstevel@tonic-gate 22150Sstevel@tonic-gate case TSTATIOC_READ: 22160Sstevel@tonic-gate mutex_enter(&tstat_lock); 22170Sstevel@tonic-gate 22180Sstevel@tonic-gate if (tstat_options & TSTAT_OPT_TLBDATA) { 22190Sstevel@tonic-gate dsize = tstat_data_t_exported_size; 22200Sstevel@tonic-gate } else { 22210Sstevel@tonic-gate dsize = sizeof (tstat_data_t); 22220Sstevel@tonic-gate } 22230Sstevel@tonic-gate 22240Sstevel@tonic-gate for (i = 0, out = 0; i <= max_cpuid; i++) { 22250Sstevel@tonic-gate tstat_percpu_t *tcpu = &tstat_percpu[i]; 22260Sstevel@tonic-gate 22270Sstevel@tonic-gate if (!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED)) 22280Sstevel@tonic-gate continue; 22290Sstevel@tonic-gate 22300Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED); 22310Sstevel@tonic-gate ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED); 22320Sstevel@tonic-gate 22330Sstevel@tonic-gate tstat_buffer->tdata_cpuid = -1; 22340Sstevel@tonic-gate xc_one(i, (xcfunc_t *)trapstat_snapshot, 0, 0); 22350Sstevel@tonic-gate 22360Sstevel@tonic-gate if (tstat_buffer->tdata_cpuid == -1) { 22370Sstevel@tonic-gate /* 22380Sstevel@tonic-gate * This CPU is not currently responding to 22390Sstevel@tonic-gate * cross calls; we have caught it while it is 22400Sstevel@tonic-gate * being unconfigured. We'll drop tstat_lock 22410Sstevel@tonic-gate * and pick up and drop cpu_lock. By the 22420Sstevel@tonic-gate * time we acquire cpu_lock, the DR operation 22430Sstevel@tonic-gate * will appear consistent and we can assert 22440Sstevel@tonic-gate * that trapstat_cpu_setup() has cleared 22450Sstevel@tonic-gate * TSTAT_CPU_ENABLED. 22460Sstevel@tonic-gate */ 22470Sstevel@tonic-gate mutex_exit(&tstat_lock); 22480Sstevel@tonic-gate mutex_enter(&cpu_lock); 22490Sstevel@tonic-gate mutex_exit(&cpu_lock); 22500Sstevel@tonic-gate mutex_enter(&tstat_lock); 22510Sstevel@tonic-gate ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED)); 22520Sstevel@tonic-gate continue; 22530Sstevel@tonic-gate } 22540Sstevel@tonic-gate 22550Sstevel@tonic-gate /* 22560Sstevel@tonic-gate * Need to compensate for the difference between page 22570Sstevel@tonic-gate * sizes exported to users and page sizes available 22580Sstevel@tonic-gate * within the kernel. 22590Sstevel@tonic-gate */ 22600Sstevel@tonic-gate if ((tstat_options & TSTAT_OPT_TLBDATA) && 22610Sstevel@tonic-gate (tstat_pgszs != tstat_user_pgszs)) { 22620Sstevel@tonic-gate tstat_pgszdata_t *tp; 22630Sstevel@tonic-gate uint_t szc; 22640Sstevel@tonic-gate 22650Sstevel@tonic-gate tp = &tstat_buffer->tdata_pgsz[0]; 22660Sstevel@tonic-gate for (j = 0; j < tstat_user_pgszs; j++) { 22670Sstevel@tonic-gate if ((szc = USERSZC_2_SZC(j)) != j) { 22680Sstevel@tonic-gate bcopy(&tp[szc], &tp[j], 22690Sstevel@tonic-gate sizeof (tstat_pgszdata_t)); 22700Sstevel@tonic-gate } 22710Sstevel@tonic-gate } 22720Sstevel@tonic-gate } 22730Sstevel@tonic-gate 22740Sstevel@tonic-gate if (copyout(tstat_buffer, (void *)arg, dsize) != 0) { 22750Sstevel@tonic-gate mutex_exit(&tstat_lock); 22760Sstevel@tonic-gate return (EFAULT); 22770Sstevel@tonic-gate } 22780Sstevel@tonic-gate 22790Sstevel@tonic-gate out++; 22800Sstevel@tonic-gate arg += dsize; 22810Sstevel@tonic-gate } 22820Sstevel@tonic-gate 22830Sstevel@tonic-gate if (out != max_cpuid + 1) { 22840Sstevel@tonic-gate processorid_t cpuid = -1; 22850Sstevel@tonic-gate arg += offsetof(tstat_data_t, tdata_cpuid); 22860Sstevel@tonic-gate 22870Sstevel@tonic-gate if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) { 22880Sstevel@tonic-gate mutex_exit(&tstat_lock); 22890Sstevel@tonic-gate return (EFAULT); 22900Sstevel@tonic-gate } 22910Sstevel@tonic-gate } 22920Sstevel@tonic-gate 22930Sstevel@tonic-gate mutex_exit(&tstat_lock); 22940Sstevel@tonic-gate 22950Sstevel@tonic-gate return (0); 22960Sstevel@tonic-gate 22970Sstevel@tonic-gate case TSTATIOC_TLBDATA: 22980Sstevel@tonic-gate return (trapstat_option(TSTAT_OPT_TLBDATA)); 22990Sstevel@tonic-gate 23000Sstevel@tonic-gate default: 23010Sstevel@tonic-gate break; 23020Sstevel@tonic-gate } 23030Sstevel@tonic-gate 23040Sstevel@tonic-gate return (ENOTTY); 23050Sstevel@tonic-gate } 23060Sstevel@tonic-gate 23070Sstevel@tonic-gate /*ARGSUSED*/ 23080Sstevel@tonic-gate static int 23090Sstevel@tonic-gate trapstat_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 23100Sstevel@tonic-gate { 23110Sstevel@tonic-gate int error; 23120Sstevel@tonic-gate 23130Sstevel@tonic-gate switch (infocmd) { 23140Sstevel@tonic-gate case DDI_INFO_DEVT2DEVINFO: 23150Sstevel@tonic-gate *result = (void *)tstat_devi; 23160Sstevel@tonic-gate error = DDI_SUCCESS; 23170Sstevel@tonic-gate break; 23180Sstevel@tonic-gate case DDI_INFO_DEVT2INSTANCE: 23190Sstevel@tonic-gate *result = (void *)0; 23200Sstevel@tonic-gate error = DDI_SUCCESS; 23210Sstevel@tonic-gate break; 23220Sstevel@tonic-gate default: 23230Sstevel@tonic-gate error = DDI_FAILURE; 23240Sstevel@tonic-gate } 23250Sstevel@tonic-gate return (error); 23260Sstevel@tonic-gate } 23270Sstevel@tonic-gate 23280Sstevel@tonic-gate static int 23290Sstevel@tonic-gate trapstat_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 23300Sstevel@tonic-gate { 23310Sstevel@tonic-gate switch (cmd) { 23320Sstevel@tonic-gate case DDI_ATTACH: 23330Sstevel@tonic-gate break; 23340Sstevel@tonic-gate 23350Sstevel@tonic-gate case DDI_RESUME: 23360Sstevel@tonic-gate return (DDI_SUCCESS); 23370Sstevel@tonic-gate 23380Sstevel@tonic-gate default: 23390Sstevel@tonic-gate return (DDI_FAILURE); 23400Sstevel@tonic-gate } 23410Sstevel@tonic-gate 23420Sstevel@tonic-gate if (ddi_create_minor_node(devi, "trapstat", S_IFCHR, 23430Sstevel@tonic-gate 0, DDI_PSEUDO, 0) == DDI_FAILURE) { 23440Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 23450Sstevel@tonic-gate return (DDI_FAILURE); 23460Sstevel@tonic-gate } 23470Sstevel@tonic-gate 23480Sstevel@tonic-gate ddi_report_dev(devi); 23490Sstevel@tonic-gate tstat_devi = devi; 23500Sstevel@tonic-gate 23510Sstevel@tonic-gate tstat_pgszs = page_num_pagesizes(); 23525349Skchow tstat_user_pgszs = page_num_user_pagesizes(0); 23530Sstevel@tonic-gate tstat_data_t_size = sizeof (tstat_data_t) + 23540Sstevel@tonic-gate (tstat_pgszs - 1) * sizeof (tstat_pgszdata_t); 23550Sstevel@tonic-gate tstat_data_t_exported_size = sizeof (tstat_data_t) + 23560Sstevel@tonic-gate (tstat_user_pgszs - 1) * sizeof (tstat_pgszdata_t); 23570Sstevel@tonic-gate #ifndef sun4v 23580Sstevel@tonic-gate tstat_data_pages = (tstat_data_t_size >> MMU_PAGESHIFT) + 1; 23590Sstevel@tonic-gate tstat_total_pages = TSTAT_INSTR_PAGES + tstat_data_pages; 23600Sstevel@tonic-gate tstat_data_size = tstat_data_pages * MMU_PAGESIZE; 23610Sstevel@tonic-gate tstat_total_size = TSTAT_INSTR_SIZE + tstat_data_size; 23620Sstevel@tonic-gate #else 23634732Sdavemq ASSERT(tstat_data_t_size <= TSTAT_DATA_SIZE); 23640Sstevel@tonic-gate #endif 23650Sstevel@tonic-gate 23660Sstevel@tonic-gate tstat_percpu = kmem_zalloc((max_cpuid + 1) * 23670Sstevel@tonic-gate sizeof (tstat_percpu_t), KM_SLEEP); 23680Sstevel@tonic-gate 23690Sstevel@tonic-gate /* 23700Sstevel@tonic-gate * Create our own arena backed by segkmem to assure a source of 23710Sstevel@tonic-gate * MMU_PAGESIZE-aligned allocations. We allocate out of the 23720Sstevel@tonic-gate * heap32_arena to assure that we can address the allocated memory with 23730Sstevel@tonic-gate * a single sethi/simm13 pair in the interposing trap table entries. 23740Sstevel@tonic-gate */ 23750Sstevel@tonic-gate tstat_arena = vmem_create("trapstat", NULL, 0, MMU_PAGESIZE, 23760Sstevel@tonic-gate segkmem_alloc_permanent, segkmem_free, heap32_arena, 0, VM_SLEEP); 23770Sstevel@tonic-gate 23780Sstevel@tonic-gate tstat_enabled = kmem_alloc(TSTAT_TOTAL_NENT * sizeof (int), KM_SLEEP); 23790Sstevel@tonic-gate tstat_buffer = kmem_alloc(tstat_data_t_size, KM_SLEEP); 23800Sstevel@tonic-gate 23810Sstevel@tonic-gate /* 23820Sstevel@tonic-gate * CB_CL_CPR_POST_USER is the class that executes from cpr_resume() 23830Sstevel@tonic-gate * after user threads can be restarted. By executing in this class, 23840Sstevel@tonic-gate * we are assured of the availability of system services needed to 23850Sstevel@tonic-gate * resume trapstat (specifically, we are assured that all CPUs are 23860Sstevel@tonic-gate * restarted and responding to cross calls). 23870Sstevel@tonic-gate */ 23880Sstevel@tonic-gate tstat_cprcb = 23890Sstevel@tonic-gate callb_add(trapstat_cpr, NULL, CB_CL_CPR_POST_USER, "trapstat"); 23900Sstevel@tonic-gate 23910Sstevel@tonic-gate return (DDI_SUCCESS); 23920Sstevel@tonic-gate } 23930Sstevel@tonic-gate 23940Sstevel@tonic-gate static int 23950Sstevel@tonic-gate trapstat_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 23960Sstevel@tonic-gate { 23970Sstevel@tonic-gate int rval; 23980Sstevel@tonic-gate 23990Sstevel@tonic-gate ASSERT(devi == tstat_devi); 24000Sstevel@tonic-gate 24010Sstevel@tonic-gate switch (cmd) { 24020Sstevel@tonic-gate case DDI_DETACH: 24030Sstevel@tonic-gate break; 24040Sstevel@tonic-gate 24050Sstevel@tonic-gate case DDI_SUSPEND: 24060Sstevel@tonic-gate return (DDI_SUCCESS); 24070Sstevel@tonic-gate 24080Sstevel@tonic-gate default: 24090Sstevel@tonic-gate return (DDI_FAILURE); 24100Sstevel@tonic-gate } 24110Sstevel@tonic-gate 24120Sstevel@tonic-gate ASSERT(!tstat_running); 24130Sstevel@tonic-gate 24140Sstevel@tonic-gate rval = callb_delete(tstat_cprcb); 24150Sstevel@tonic-gate ASSERT(rval == 0); 24160Sstevel@tonic-gate 24170Sstevel@tonic-gate kmem_free(tstat_buffer, tstat_data_t_size); 24180Sstevel@tonic-gate kmem_free(tstat_enabled, TSTAT_TOTAL_NENT * sizeof (int)); 24190Sstevel@tonic-gate vmem_destroy(tstat_arena); 24200Sstevel@tonic-gate kmem_free(tstat_percpu, (max_cpuid + 1) * sizeof (tstat_percpu_t)); 24210Sstevel@tonic-gate ddi_remove_minor_node(devi, NULL); 24220Sstevel@tonic-gate 24230Sstevel@tonic-gate return (DDI_SUCCESS); 24240Sstevel@tonic-gate } 24250Sstevel@tonic-gate 24260Sstevel@tonic-gate /* 24270Sstevel@tonic-gate * Configuration data structures 24280Sstevel@tonic-gate */ 24290Sstevel@tonic-gate static struct cb_ops trapstat_cb_ops = { 24300Sstevel@tonic-gate trapstat_open, /* open */ 24310Sstevel@tonic-gate trapstat_close, /* close */ 24320Sstevel@tonic-gate nulldev, /* strategy */ 24330Sstevel@tonic-gate nulldev, /* print */ 24340Sstevel@tonic-gate nodev, /* dump */ 24350Sstevel@tonic-gate nodev, /* read */ 24360Sstevel@tonic-gate nodev, /* write */ 24370Sstevel@tonic-gate trapstat_ioctl, /* ioctl */ 24380Sstevel@tonic-gate nodev, /* devmap */ 24390Sstevel@tonic-gate nodev, /* mmap */ 24400Sstevel@tonic-gate nodev, /* segmap */ 24410Sstevel@tonic-gate nochpoll, /* poll */ 24420Sstevel@tonic-gate ddi_prop_op, /* cb_prop_op */ 24430Sstevel@tonic-gate 0, /* streamtab */ 24440Sstevel@tonic-gate D_MP | D_NEW /* Driver compatibility flag */ 24450Sstevel@tonic-gate }; 24460Sstevel@tonic-gate 24470Sstevel@tonic-gate static struct dev_ops trapstat_ops = { 24480Sstevel@tonic-gate DEVO_REV, /* devo_rev, */ 24490Sstevel@tonic-gate 0, /* refcnt */ 24500Sstevel@tonic-gate trapstat_info, /* getinfo */ 24510Sstevel@tonic-gate nulldev, /* identify */ 24520Sstevel@tonic-gate nulldev, /* probe */ 24530Sstevel@tonic-gate trapstat_attach, /* attach */ 24540Sstevel@tonic-gate trapstat_detach, /* detach */ 24550Sstevel@tonic-gate nulldev, /* reset */ 24560Sstevel@tonic-gate &trapstat_cb_ops, /* cb_ops */ 24570Sstevel@tonic-gate (struct bus_ops *)0, /* bus_ops */ 2458*7656SSherry.Moore@Sun.COM NULL, /* power */ 2459*7656SSherry.Moore@Sun.COM ddi_quiesce_not_needed, /* quiesce */ 24600Sstevel@tonic-gate }; 24610Sstevel@tonic-gate 24620Sstevel@tonic-gate static struct modldrv modldrv = { 24630Sstevel@tonic-gate &mod_driverops, /* Type of module. This one is a driver */ 24640Sstevel@tonic-gate "Trap Statistics", /* name of module */ 24650Sstevel@tonic-gate &trapstat_ops, /* driver ops */ 24660Sstevel@tonic-gate }; 24670Sstevel@tonic-gate 24680Sstevel@tonic-gate static struct modlinkage modlinkage = { 24690Sstevel@tonic-gate MODREV_1, (void *)&modldrv, NULL 24700Sstevel@tonic-gate }; 24710Sstevel@tonic-gate 24720Sstevel@tonic-gate int 24730Sstevel@tonic-gate _init(void) 24740Sstevel@tonic-gate { 24750Sstevel@tonic-gate return (mod_install(&modlinkage)); 24760Sstevel@tonic-gate } 24770Sstevel@tonic-gate 24780Sstevel@tonic-gate int 24790Sstevel@tonic-gate _fini(void) 24800Sstevel@tonic-gate { 24810Sstevel@tonic-gate return (mod_remove(&modlinkage)); 24820Sstevel@tonic-gate } 24830Sstevel@tonic-gate 24840Sstevel@tonic-gate int 24850Sstevel@tonic-gate _info(struct modinfo *modinfop) 24860Sstevel@tonic-gate { 24870Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 24880Sstevel@tonic-gate } 2489