xref: /onnv-gate/usr/src/uts/sun4/io/trapstat.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include <sys/systm.h>
30*0Sstevel@tonic-gate #include <sys/conf.h>
31*0Sstevel@tonic-gate #include <sys/stat.h>
32*0Sstevel@tonic-gate #include <sys/ddi.h>
33*0Sstevel@tonic-gate #include <sys/sunddi.h>
34*0Sstevel@tonic-gate #include <sys/modctl.h>
35*0Sstevel@tonic-gate #include <sys/cpu_module.h>
36*0Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
37*0Sstevel@tonic-gate #include <vm/seg_kmem.h>
38*0Sstevel@tonic-gate #include <vm/seg_kpm.h>
39*0Sstevel@tonic-gate #include <vm/vm_dep.h>
40*0Sstevel@tonic-gate #include <sys/machsystm.h>
41*0Sstevel@tonic-gate #include <sys/machasi.h>
42*0Sstevel@tonic-gate #include <sys/sysmacros.h>
43*0Sstevel@tonic-gate #include <sys/callb.h>
44*0Sstevel@tonic-gate #include <sys/archsystm.h>
45*0Sstevel@tonic-gate #include <sys/trapstat.h>
46*0Sstevel@tonic-gate #ifdef sun4v
47*0Sstevel@tonic-gate #include <sys/hypervisor_api.h>
48*0Sstevel@tonic-gate #endif
49*0Sstevel@tonic-gate 
50*0Sstevel@tonic-gate /* BEGIN CSTYLED */
51*0Sstevel@tonic-gate /*
52*0Sstevel@tonic-gate  * trapstat:  Trap Statistics through Dynamic Trap Table Interposition
53*0Sstevel@tonic-gate  * -------------------------------------------------------------------
54*0Sstevel@tonic-gate  *
55*0Sstevel@tonic-gate  * Motivation and Overview
56*0Sstevel@tonic-gate  *
57*0Sstevel@tonic-gate  * Despite being a fundamental indicator of system behavior, there has
58*0Sstevel@tonic-gate  * historically been very little insight provided into the frequency and cost
59*0Sstevel@tonic-gate  * of machine-specific traps.  The lack of insight has been especially acute
60*0Sstevel@tonic-gate  * on UltraSPARC microprocessors:  because these microprocessors handle TLB
61*0Sstevel@tonic-gate  * misses as software traps, the frequency and duration of traps play a
62*0Sstevel@tonic-gate  * decisive role in the performance of the memory system.  As applications have
63*0Sstevel@tonic-gate  * increasingly outstripped TLB reach, this has become increasingly true.
64*0Sstevel@tonic-gate  *
65*0Sstevel@tonic-gate  * Part of the difficulty of observing trap behavior is that the trap handlers
66*0Sstevel@tonic-gate  * are so frequently called (e.g. millions of times per second) that any
67*0Sstevel@tonic-gate  * permanently enabled instrumentation would induce an unacceptable performance
68*0Sstevel@tonic-gate  * degradation.  Thus, it is a constraint on any trap observability
69*0Sstevel@tonic-gate  * infrastructure that it have no probe effect when not explicitly enabled.
70*0Sstevel@tonic-gate  *
71*0Sstevel@tonic-gate  * The basic idea, then, is to create an interposing trap table in which each
72*0Sstevel@tonic-gate  * entry increments a per-trap, in-memory counter and then jumps to the actual,
73*0Sstevel@tonic-gate  * underlying trap table entry.  To enable trapstat, we atomically write to the
74*0Sstevel@tonic-gate  * trap base address (%tba) register to point to our interposing trap table.
75*0Sstevel@tonic-gate  * (Note that per-CPU statistics fall out by creating a different trap table
76*0Sstevel@tonic-gate  * for each CPU.)
77*0Sstevel@tonic-gate  *
78*0Sstevel@tonic-gate  * Implementation Details
79*0Sstevel@tonic-gate  *
80*0Sstevel@tonic-gate  * While the idea is straight-forward, a nuance of SPARC V9 slightly
81*0Sstevel@tonic-gate  * complicates the implementation.  Unlike its predecessors, SPARC V9 supports
82*0Sstevel@tonic-gate  * the notion of nested traps.  The trap level is kept in the TL register:
83*0Sstevel@tonic-gate  * during normal operation it is 0; when a trap is taken, the TL register is
84*0Sstevel@tonic-gate  * incremented by 1.  To aid system software, SPARC V9 breaks the trap table
85*0Sstevel@tonic-gate  * into two halves:  the lower half contains the trap handlers for traps taken
86*0Sstevel@tonic-gate  * when TL is 0; the upper half contains the trap handlers for traps taken
87*0Sstevel@tonic-gate  * when TL is greater than 0.  Each half is further subdivided into two
88*0Sstevel@tonic-gate  * subsequent halves:  the lower half contains the trap handlers for traps
89*0Sstevel@tonic-gate  * other than those induced by the trap instruction (Tcc variants); the upper
90*0Sstevel@tonic-gate  * half contains the trap handlers for traps induced by the trap instruction.
91*0Sstevel@tonic-gate  * This gives a total of four ranges, with each range containing 256 traps:
92*0Sstevel@tonic-gate  *
93*0Sstevel@tonic-gate  *       +--------------------------------+- 3ff
94*0Sstevel@tonic-gate  *       |                                |   .
95*0Sstevel@tonic-gate  *       |     Trap instruction, TL>0     |   .
96*0Sstevel@tonic-gate  *       |                                |   .
97*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 300
98*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 2ff
99*0Sstevel@tonic-gate  *       |                                |   .
100*0Sstevel@tonic-gate  *       |   Non-trap instruction, TL>0   |   .
101*0Sstevel@tonic-gate  *       |                                |   .
102*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 200
103*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 1ff
104*0Sstevel@tonic-gate  *       |                                |   .
105*0Sstevel@tonic-gate  *       |     Trap instruction, TL=0     |   .
106*0Sstevel@tonic-gate  *       |                                |   .
107*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 100
108*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 0ff
109*0Sstevel@tonic-gate  *       |                                |   .
110*0Sstevel@tonic-gate  *       |   Non-trap instruction, TL=0   |   .
111*0Sstevel@tonic-gate  *       |                                |   .
112*0Sstevel@tonic-gate  *       +--------------------------------+- 000
113*0Sstevel@tonic-gate  *
114*0Sstevel@tonic-gate  *
115*0Sstevel@tonic-gate  * Solaris, however, doesn't have reason to support trap instructions when
116*0Sstevel@tonic-gate  * TL>0 (only privileged code may execute at TL>0; not supporting this only
117*0Sstevel@tonic-gate  * constrains our own implementation).  The trap table actually looks like:
118*0Sstevel@tonic-gate  *
119*0Sstevel@tonic-gate  *       +--------------------------------+- 2ff
120*0Sstevel@tonic-gate  *       |                                |   .
121*0Sstevel@tonic-gate  *       |   Non-trap instruction, TL>0   |   .
122*0Sstevel@tonic-gate  *       |                                |   .
123*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 200
124*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 1ff
125*0Sstevel@tonic-gate  *       |                                |   .
126*0Sstevel@tonic-gate  *       |     Trap instruction, TL=0     |   .
127*0Sstevel@tonic-gate  *       |                                |   .
128*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 100
129*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 0ff
130*0Sstevel@tonic-gate  *       |                                |   .
131*0Sstevel@tonic-gate  *       |   Non-trap instruction, TL=0   |   .
132*0Sstevel@tonic-gate  *       |                                |   .
133*0Sstevel@tonic-gate  *       +--------------------------------+- 000
134*0Sstevel@tonic-gate  *
135*0Sstevel@tonic-gate  * Putatively to aid system software, SPARC V9 has the notion of multiple
136*0Sstevel@tonic-gate  * sets of global registers.  UltraSPARC defines four sets of global
137*0Sstevel@tonic-gate  * registers:
138*0Sstevel@tonic-gate  *
139*0Sstevel@tonic-gate  *    Normal Globals
140*0Sstevel@tonic-gate  *    Alternate Globals (AGs)
141*0Sstevel@tonic-gate  *    MMU Globals (MGs)
142*0Sstevel@tonic-gate  *    Interrupt Globals (IGs)
143*0Sstevel@tonic-gate  *
144*0Sstevel@tonic-gate  * The set of globals in use is controlled by bits in PSTATE; when TL is 0
145*0Sstevel@tonic-gate  * (and PSTATE has not been otherwise explicitly modified), the Normal Globals
146*0Sstevel@tonic-gate  * are in use.  When a trap is issued, PSTATE is modified to point to a set of
147*0Sstevel@tonic-gate  * globals corresponding to the trap type.  Most traps correspond to the
148*0Sstevel@tonic-gate  * Alternate Globals, with a minority corresponding to the MMU Globals, and
149*0Sstevel@tonic-gate  * only the interrupt-vector trap (vector 0x60) corresponding to the Interrupt
150*0Sstevel@tonic-gate  * Globals.  (The complete mapping can be found in the UltraSPARC I&II User's
151*0Sstevel@tonic-gate  * Manual.)
152*0Sstevel@tonic-gate  *
153*0Sstevel@tonic-gate  * Note that the sets of globals are per trap _type_, not per trap _level_.
154*0Sstevel@tonic-gate  * Thus, when executing a TL>0 trap handler, one may not have registers
155*0Sstevel@tonic-gate  * available (for example, both trap-instruction traps and spill traps execute
156*0Sstevel@tonic-gate  * on the alternate globals; if a trap-instruction trap induces a window spill,
157*0Sstevel@tonic-gate  * the window spill handler has no available globals).  For trapstat, this is
158*0Sstevel@tonic-gate  * problematic:  a register is required to transfer control from one arbitrary
159*0Sstevel@tonic-gate  * location (in the interposing trap table) to another (in the actual trap
160*0Sstevel@tonic-gate  * table).
161*0Sstevel@tonic-gate  *
162*0Sstevel@tonic-gate  * We solve this problem by exploiting the trap table's location at the bottom
163*0Sstevel@tonic-gate  * of valid kernel memory (i.e. at KERNELBASE).  We locate the interposing trap
164*0Sstevel@tonic-gate  * tables just below KERNELBASE -- thereby allowing us to use a branch-always
165*0Sstevel@tonic-gate  * instruction (ba) instead of a jump instruction (jmp) to transfer control
166*0Sstevel@tonic-gate  * from the TL>0 entries in the interposing trap table to the TL>0 entries in
167*0Sstevel@tonic-gate  * the actual trap table.  (N.B. while this allows trap table interposition to
168*0Sstevel@tonic-gate  * work, it necessarily limits trapstat to only recording information about
169*0Sstevel@tonic-gate  * TL=0 traps -- there is no way to increment a counter without using a
170*0Sstevel@tonic-gate  * register.)  Diagrammatically:
171*0Sstevel@tonic-gate  *
172*0Sstevel@tonic-gate  *  Actual trap table:
173*0Sstevel@tonic-gate  *
174*0Sstevel@tonic-gate  *       +--------------------------------+- 2ff
175*0Sstevel@tonic-gate  *       |                                |   .
176*0Sstevel@tonic-gate  *       |   Non-trap instruction, TL>0   |   .   <-----------------------+
177*0Sstevel@tonic-gate  *       |                                |   .   <-----------------------|-+
178*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 200  <-----------------------|-|-+
179*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 1ff                          | | |
180*0Sstevel@tonic-gate  *       |                                |   .                           | | |
181*0Sstevel@tonic-gate  *       |     Trap instruction, TL=0     |   .   <-----------------+     | | |
182*0Sstevel@tonic-gate  *       |                                |   .   <-----------------|-+   | | |
183*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 100  <-----------------|-|-+ | | |
184*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 0ff                    | | | | | |
185*0Sstevel@tonic-gate  *       |                                |   .                     | | | | | |
186*0Sstevel@tonic-gate  *       |   Non-trap instruction, TL=0   |   .   <-----------+     | | | | | |
187*0Sstevel@tonic-gate  *       |                                |   .   <-----------|-+   | | | | | |
188*0Sstevel@tonic-gate  *       +--------------------------------+- 000  <-----------|-|-+ | | | | | |
189*0Sstevel@tonic-gate  *        KERNELBASE                                          | | | | | | | | |
190*0Sstevel@tonic-gate  *                                                            | | | | | | | | |
191*0Sstevel@tonic-gate  *                                                            | | | | | | | | |
192*0Sstevel@tonic-gate  *  Interposing trap table:                                   | | | | | | | | |
193*0Sstevel@tonic-gate  *                                                            | | | | | | | | |
194*0Sstevel@tonic-gate  *       +--------------------------------+- 2ff              | | | | | | | | |
195*0Sstevel@tonic-gate  *       |  ...                           |   .               | | | | | | | | |
196*0Sstevel@tonic-gate  *       |  ...                           |   .               | | | | | | | | |
197*0Sstevel@tonic-gate  *       |  ...                           |   .               | | | | | | | | |
198*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 203              | | | | | | | | |
199*0Sstevel@tonic-gate  *       |  ba,a                          |      -------------|-|-|-|-|-|-+ | |
200*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 202              | | | | | |   | |
201*0Sstevel@tonic-gate  *       |  ba,a                          |      -------------|-|-|-|-|-|---+ |
202*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 201              | | | | | |     |
203*0Sstevel@tonic-gate  *       |  ba,a                          |      -------------|-|-|-|-|-|-----+
204*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 200              | | | | | |
205*0Sstevel@tonic-gate  *       |  ...                           |   .               | | | | | |
206*0Sstevel@tonic-gate  *       |  ...                           |   .               | | | | | |
207*0Sstevel@tonic-gate  *       |  ...                           |   .               | | | | | |
208*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 103              | | | | | |
209*0Sstevel@tonic-gate  *       |  (Increment counter)           |                   | | | | | |
210*0Sstevel@tonic-gate  *       |  ba,a                          |      -------------------+ | |
211*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 102              | | |   | |
212*0Sstevel@tonic-gate  *       |  (Increment counter)           |                   | | |   | |
213*0Sstevel@tonic-gate  *       |  ba,a                          |      ---------------------+ |
214*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 101              | | |     |
215*0Sstevel@tonic-gate  *       |  (Increment counter)           |                   | | |     |
216*0Sstevel@tonic-gate  *       |  ba,a                          |      -----------------------+
217*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 100              | | |
218*0Sstevel@tonic-gate  *       |  ...                           |   .               | | |
219*0Sstevel@tonic-gate  *       |  ...                           |   .               | | |
220*0Sstevel@tonic-gate  *       |  ...                           |   .               | | |
221*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 003              | | |
222*0Sstevel@tonic-gate  *       |  (Increment counter)           |                   | | |
223*0Sstevel@tonic-gate  *       |  ba,a                          |      -------------+ | |
224*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 002                | |
225*0Sstevel@tonic-gate  *       |  (Increment counter)           |                     | |
226*0Sstevel@tonic-gate  *       |  ba,a                          |      ---------------+ |
227*0Sstevel@tonic-gate  *       |- - - - - - - - - - - - - - - - +- 001                  |
228*0Sstevel@tonic-gate  *       |  (Increment counter)           |                       |
229*0Sstevel@tonic-gate  *       |  ba,a                          |      -----------------+
230*0Sstevel@tonic-gate  *       +--------------------------------+- 000
231*0Sstevel@tonic-gate  *        KERNELBASE - tstat_total_size
232*0Sstevel@tonic-gate  *
233*0Sstevel@tonic-gate  * tstat_total_size is the number of pages required for each trap table.  It
234*0Sstevel@tonic-gate  * must be true that KERNELBASE - tstat_total_size is less than the maximum
235*0Sstevel@tonic-gate  * branch displacement; if each CPU were to consume a disjoint virtual range
236*0Sstevel@tonic-gate  * below KERNELBASE for its trap table, we could support at most
237*0Sstevel@tonic-gate  * (maximum_branch_displacement / tstat_total_size) CPUs.  The maximum branch
238*0Sstevel@tonic-gate  * displacement for Bicc variants is just under eight megabytes, and (because
239*0Sstevel@tonic-gate  * the %tba must be 32K aligned), tstat_total_size must be at least 32K; if
240*0Sstevel@tonic-gate  * each CPU were to consume a disjoint virtual range, we would have an
241*0Sstevel@tonic-gate  * unacceptably low upper bound of 256 CPUs.
242*0Sstevel@tonic-gate  *
243*0Sstevel@tonic-gate  * While there are tricks that one could use to address this constraint (e.g.,
244*0Sstevel@tonic-gate  * creating trampolines every maximum_branch_displacement bytes), we instead
245*0Sstevel@tonic-gate  * solve this by not permitting each CPU to consume a disjoint virtual range.
246*0Sstevel@tonic-gate  * Rather, we have each CPU's interposing trap table use the _same_ virtual
247*0Sstevel@tonic-gate  * range, but we back the trap tables with disjoint physical memory.  Normally,
248*0Sstevel@tonic-gate  * such one-to-many virtual-to-physical mappings are illegal; this is
249*0Sstevel@tonic-gate  * permissible here only because the pages for the interposing trap table are
250*0Sstevel@tonic-gate  * necessarily locked in the TLB.  (The CPUs thus never have the opportunity to
251*0Sstevel@tonic-gate  * discover that they have conflicting translations.)
252*0Sstevel@tonic-gate  *
253*0Sstevel@tonic-gate  * On CMT architectures in which CPUs can share MMUs, the above trick will not
254*0Sstevel@tonic-gate  * work: two CPUs that share an MMU cannot have the same virtual address map
255*0Sstevel@tonic-gate  * to disjoint physical pages.  On these architectures, any CPUs sharing the
256*0Sstevel@tonic-gate  * same MMU must consume a disjoint 32K virtual address range -- limiting the
257*0Sstevel@tonic-gate  * number of CPUs sharing an MMU on these architectures to 256 due to the
258*0Sstevel@tonic-gate  * branch displacement limitation described above.  On the sun4v architecture,
259*0Sstevel@tonic-gate  * there is a further limitation: a guest may not have more than eight locked
260*0Sstevel@tonic-gate  * TLB entries per MMU.  To allow operation under this restriction, the
261*0Sstevel@tonic-gate  * interposing trap table and the trap statistics are each accessed through
262*0Sstevel@tonic-gate  * a single 4M TLB entry.  This limits the footprint to two locked entries
263*0Sstevel@tonic-gate  * (one for the I-TLB and one for the D-TLB), but further restricts the number
264*0Sstevel@tonic-gate  * of CPUs to 128 per MMU.  However, support for more than 128 CPUs can easily
265*0Sstevel@tonic-gate  * be added via a hybrid scheme, where the same 4M virtual address is used
266*0Sstevel@tonic-gate  * on different MMUs.
267*0Sstevel@tonic-gate  *
268*0Sstevel@tonic-gate  *
269*0Sstevel@tonic-gate  * TLB Statistics
270*0Sstevel@tonic-gate  *
271*0Sstevel@tonic-gate  * Because TLB misses are an important component of system performance, we wish
272*0Sstevel@tonic-gate  * to know much more about these traps than simply the number received.
273*0Sstevel@tonic-gate  * Specifically, we wish to know:
274*0Sstevel@tonic-gate  *
275*0Sstevel@tonic-gate  *  (a)	The amount of time spent executing the TLB miss handler
276*0Sstevel@tonic-gate  *  (b)	TLB misses versus TSB misses
277*0Sstevel@tonic-gate  *  (c) Kernel-level misses versus user-level misses
278*0Sstevel@tonic-gate  *  (d) Misses per pagesize
279*0Sstevel@tonic-gate  *
280*0Sstevel@tonic-gate  * TLB Statistics: Time Spent Executing
281*0Sstevel@tonic-gate  *
282*0Sstevel@tonic-gate  * To accurately determine the amount of time spent executing the TLB miss
283*0Sstevel@tonic-gate  * handler, one must get a timestamp on trap entry and trap exit, subtract the
284*0Sstevel@tonic-gate  * latter from the former, and add the result to an accumulating count.
285*0Sstevel@tonic-gate  * Consider flow of control during normal TLB miss processing (where "ldx
286*0Sstevel@tonic-gate  * [%g2], %g2" is an arbitrary TLB-missing instruction):
287*0Sstevel@tonic-gate  *
288*0Sstevel@tonic-gate  * + - - - - - - - -+
289*0Sstevel@tonic-gate  * :                :
290*0Sstevel@tonic-gate  * : ldx [%g2], %g2 :<-------------------------------------------------------+
291*0Sstevel@tonic-gate  * :                :              Return from trap:                         |
292*0Sstevel@tonic-gate  * + - - - - - - - -+                TL <- TL - 1 (0)                        |
293*0Sstevel@tonic-gate  *	  |                          %pc <- TSTATE[TL].TPC (address of load) |
294*0Sstevel@tonic-gate  *	  | TLB miss:                                                        |
295*0Sstevel@tonic-gate  *        |   TL <- TL + 1 (1)                                               |
296*0Sstevel@tonic-gate  *        |   %pc <- TLB-miss-trap-handler                                   |
297*0Sstevel@tonic-gate  *        |                                                                  |
298*0Sstevel@tonic-gate  *        v                                                                  |
299*0Sstevel@tonic-gate  * + - - - - - - - - - - - - - - - +                                         |
300*0Sstevel@tonic-gate  * :                               :                                         |
301*0Sstevel@tonic-gate  * : Lookup VA in TSB              :                                         |
302*0Sstevel@tonic-gate  * : If (hit)                      :                                         |
303*0Sstevel@tonic-gate  * :     Fill TLB                  :                                         |
304*0Sstevel@tonic-gate  * : Else                          :                                         |
305*0Sstevel@tonic-gate  * :     Lookup VA (hme hash table :                                         |
306*0Sstevel@tonic-gate  * :                or segkpm)     :                                         |
307*0Sstevel@tonic-gate  * :     Fill TLB                  :                                         |
308*0Sstevel@tonic-gate  * : Endif                         :                                         |
309*0Sstevel@tonic-gate  * : Issue "retry"  ---------------------------------------------------------+
310*0Sstevel@tonic-gate  * :                               :
311*0Sstevel@tonic-gate  * + - - - - - - - - - - - - - - - +
312*0Sstevel@tonic-gate  *  TLB-miss-trap-handler
313*0Sstevel@tonic-gate  *
314*0Sstevel@tonic-gate  *
315*0Sstevel@tonic-gate  * As the above diagram indicates, interposing on the trap table allows one
316*0Sstevel@tonic-gate  * only to determine a timestamp on trap _entry_:  when the TLB miss handler
317*0Sstevel@tonic-gate  * has completed filling the TLB, a "retry" will be issued, and control will
318*0Sstevel@tonic-gate  * transfer immediately back to the missing %pc.
319*0Sstevel@tonic-gate  *
320*0Sstevel@tonic-gate  * To obtain a timestamp on trap exit, we must then somehow interpose between
321*0Sstevel@tonic-gate  * the "retry" and the subsequent control transfer to the TLB-missing
322*0Sstevel@tonic-gate  * instruction.  To do this, we _push_ a trap level.  The basic idea is to
323*0Sstevel@tonic-gate  * spoof a TLB miss by raising TL, setting the %tpc to be within text
324*0Sstevel@tonic-gate  * controlled by trapstat (the "TLB return entry") and branching to the
325*0Sstevel@tonic-gate  * underlying TLB miss handler.  When the TLB miss handler issues its "retry",
326*0Sstevel@tonic-gate  * control will transfer not to the TLB-missing instruction, but rather to the
327*0Sstevel@tonic-gate  * TLB return entry.  This code can then obtain a timestamp, and issue its own
328*0Sstevel@tonic-gate  * "retry" -- thereby correctly returning to the TLB-missing instruction.
329*0Sstevel@tonic-gate  * Here is the above TLB miss flow control diagram modified to reflect
330*0Sstevel@tonic-gate  * trapstat's operation:
331*0Sstevel@tonic-gate  *
332*0Sstevel@tonic-gate  * + - - - - - - - -+
333*0Sstevel@tonic-gate  * :                :
334*0Sstevel@tonic-gate  * : ldx [%g2], %g2 :<-------------------------------------------------------+
335*0Sstevel@tonic-gate  * :                :             Return from trap:                          |
336*0Sstevel@tonic-gate  * + - - - - - - - -+               TL <- TL - 1 (0)                         |
337*0Sstevel@tonic-gate  *	  |                         %pc <- TSTATE[TL].TPC (address of load)  |
338*0Sstevel@tonic-gate  *	  | TLB miss:                                                        |
339*0Sstevel@tonic-gate  *        |   TL <- TL + 1 (1)                                               |
340*0Sstevel@tonic-gate  *        |   %pc <- TLB-miss-trap-handler (trapstat)                        |
341*0Sstevel@tonic-gate  *        |                                                                  |
342*0Sstevel@tonic-gate  *        v                                    TLB-return-entry (trapstat)   |
343*0Sstevel@tonic-gate  * + - - - - - - - - - - - - - - - - - - +    + - - - - - - - - - - - - - +  |
344*0Sstevel@tonic-gate  * :                                     :    :                           :  |
345*0Sstevel@tonic-gate  * : Record timestamp                    :    : Record timestamp          :  |
346*0Sstevel@tonic-gate  * : TL <- 2                             :    : Take timestamp difference :  |
347*0Sstevel@tonic-gate  * : TSTATE[1].TPC <- TLB-return-entry   :    : Add to running total      :  |
348*0Sstevel@tonic-gate  * : ba,a TLB-miss-trap-handler -----------+  : Issue "retry"  --------------+
349*0Sstevel@tonic-gate  * :                                     : |  :                           :
350*0Sstevel@tonic-gate  * + - - - - - - - - - - - - - - - - - - + |  + - - - - - - - - - - - - - +
351*0Sstevel@tonic-gate  *  TLB-miss-trap-handler	           |                  ^
352*0Sstevel@tonic-gate  *  (trapstat)                             |                  |
353*0Sstevel@tonic-gate  *                                         |                  |
354*0Sstevel@tonic-gate  *                                         |                  |
355*0Sstevel@tonic-gate  *                 +-----------------------+                  |
356*0Sstevel@tonic-gate  *                 |                                          |
357*0Sstevel@tonic-gate  *                 |                                          |
358*0Sstevel@tonic-gate  *                 v                                          |
359*0Sstevel@tonic-gate  * + - - - - - - - - - - - - - - - +                          |
360*0Sstevel@tonic-gate  * :                               :                          |
361*0Sstevel@tonic-gate  * : Lookup VA in TSB              :                          |
362*0Sstevel@tonic-gate  * : If (hit)                      :                          |
363*0Sstevel@tonic-gate  * :     Fill TLB                  :                          |
364*0Sstevel@tonic-gate  * : Else                          :                          |
365*0Sstevel@tonic-gate  * :     Lookup VA (hme hash table :                          |
366*0Sstevel@tonic-gate  * :                or segkpm)     :                          |
367*0Sstevel@tonic-gate  * :     Fill TLB                  :                          |
368*0Sstevel@tonic-gate  * : Endif                         :                          |
369*0Sstevel@tonic-gate  * : Issue "retry"  ------------------------------------------+
370*0Sstevel@tonic-gate  * :                               : Return from trap:
371*0Sstevel@tonic-gate  * + - - - - - - - - - - - - - - - +   TL <- TL - 1 (1)
372*0Sstevel@tonic-gate  *  TLB-miss-trap-handler              %pc <- TSTATE[TL].TPC (TLB-return-entry)
373*0Sstevel@tonic-gate  *
374*0Sstevel@tonic-gate  *
375*0Sstevel@tonic-gate  * A final subterfuge is required to complete our artifice:  if we miss in
376*0Sstevel@tonic-gate  * the TLB, the TSB _and_ the subsequent hash or segkpm lookup (that is, if
377*0Sstevel@tonic-gate  * there is no valid translation for the TLB-missing address), common system
378*0Sstevel@tonic-gate  * software will need to accurately determine the %tpc as part of its page
379*0Sstevel@tonic-gate  * fault handling. We therefore modify the kernel to check the %tpc in this
380*0Sstevel@tonic-gate  * case: if the %tpc falls within the VA range controlled by trapstat and
381*0Sstevel@tonic-gate  * the TL is 2, TL is simply lowered back to 1 (this check is implemented
382*0Sstevel@tonic-gate  * by the TSTAT_CHECK_TL1 macro).  Lowering TL to 1 has the effect of
383*0Sstevel@tonic-gate  * discarding the state pushed by trapstat.
384*0Sstevel@tonic-gate  *
385*0Sstevel@tonic-gate  * TLB Statistics: TLB Misses versus TSB Misses
386*0Sstevel@tonic-gate  *
387*0Sstevel@tonic-gate  * Distinguishing TLB misses from TSB misses requires further interposition
388*0Sstevel@tonic-gate  * on the TLB miss handler:  we cannot know a priori or a posteriori if a
389*0Sstevel@tonic-gate  * given VA will or has hit in the TSB.
390*0Sstevel@tonic-gate  *
391*0Sstevel@tonic-gate  * We achieve this distinction by adding a second TLB return entry almost
392*0Sstevel@tonic-gate  * identical to the first -- differing only in the address to which it
393*0Sstevel@tonic-gate  * stores its results.  We then modify the TLB miss handlers of the kernel
394*0Sstevel@tonic-gate  * such that they check the %tpc when they determine that a TLB miss has
395*0Sstevel@tonic-gate  * subsequently missed in the TSB:  if the %tpc lies within trapstat's VA
396*0Sstevel@tonic-gate  * range and TL is 2 (that is, if trapstat is running), the TLB miss handler
397*0Sstevel@tonic-gate  * _increments_ the %tpc by the size of the TLB return entry.  The ensuing
398*0Sstevel@tonic-gate  * "retry" will thus transfer control to the second TLB return entry, and
399*0Sstevel@tonic-gate  * the time spent in the handler will be accumulated in a memory location
400*0Sstevel@tonic-gate  * specific to TSB misses.
401*0Sstevel@tonic-gate  *
402*0Sstevel@tonic-gate  * N.B.:  To minimize the amount of knowledge the kernel must have of trapstat,
403*0Sstevel@tonic-gate  * we do not allow the kernel to hard-code the size of the TLB return entry.
404*0Sstevel@tonic-gate  * Rather, the actual tsbmiss handler executes a known instruction at the
405*0Sstevel@tonic-gate  * corresponding tsbmiss patch points (see the tstat_tsbmiss_patch_table) with
406*0Sstevel@tonic-gate  * the %tpc in %g7:  when trapstat is not running, these points contain the
407*0Sstevel@tonic-gate  * harmless TSTAT_TSBMISS_INSTR instruction ("add %g7, 0, %g7"). Before
408*0Sstevel@tonic-gate  * running, trapstat modifies the instructions at these patch points such
409*0Sstevel@tonic-gate  * that the simm13 equals the size of the TLB return entry.
410*0Sstevel@tonic-gate  *
411*0Sstevel@tonic-gate  * TLB Statistics: Kernel-level Misses versus User-level Misses
412*0Sstevel@tonic-gate  *
413*0Sstevel@tonic-gate  * Differentiating user-level misses from kernel-level misses employs a
414*0Sstevel@tonic-gate  * similar technique, but is simplified by the ability to distinguish a
415*0Sstevel@tonic-gate  * user-level miss from a kernel-level miss a priori by reading the context
416*0Sstevel@tonic-gate  * register:  we implement kernel-/user-level differentiation by again doubling
417*0Sstevel@tonic-gate  * the number of TLB return entries, and setting the %tpc to the appropriate
418*0Sstevel@tonic-gate  * TLB return entry in trapstat's TLB miss handler.  Together with the doubling
419*0Sstevel@tonic-gate  * of entries required for TLB-miss/TSB-miss differentiation, this yields a
420*0Sstevel@tonic-gate  * total of four TLB return entries:
421*0Sstevel@tonic-gate  *
422*0Sstevel@tonic-gate  *	Level		TSB hit?	Structure member
423*0Sstevel@tonic-gate  *	------------------------------------------------------------
424*0Sstevel@tonic-gate  *	Kernel		Yes		tstat_tlbret_t.ttlbr_ktlb
425*0Sstevel@tonic-gate  *	Kernel		No		tstat_tlbret_t.ttlbr_ktsb
426*0Sstevel@tonic-gate  *	User		Yes		tstat_tlbret_t.ttlbr_utlb
427*0Sstevel@tonic-gate  *	User		No		tstat_tlbret_t.ttlbr_utsb
428*0Sstevel@tonic-gate  *
429*0Sstevel@tonic-gate  * TLB Statistics: Misses per Pagesize
430*0Sstevel@tonic-gate  *
431*0Sstevel@tonic-gate  * As with the TLB-/TSB-miss differentiation, we have no way of determining
432*0Sstevel@tonic-gate  * pagesize a priori.  This is therefore implemented by mandating a new rule:
433*0Sstevel@tonic-gate  * whenever the kernel fills the TLB in its TLB miss handler, the TTE
434*0Sstevel@tonic-gate  * corresponding to the TLB-missing VA must be in %g5 when the handler
435*0Sstevel@tonic-gate  * executes its "retry".  This allows the TLB return entry to determine
436*0Sstevel@tonic-gate  * pagesize by simply looking at the pagesize field in the TTE stored in
437*0Sstevel@tonic-gate  * %g5.
438*0Sstevel@tonic-gate  *
439*0Sstevel@tonic-gate  * TLB Statistics: Probe Effect
440*0Sstevel@tonic-gate  *
441*0Sstevel@tonic-gate  * As one might imagine, gathering TLB statistics by pushing a trap level
442*0Sstevel@tonic-gate  * induces significant probe effect.  To account for this probe effect,
443*0Sstevel@tonic-gate  * trapstat attempts to observe it by executing a code sequence with a known
444*0Sstevel@tonic-gate  * number of TLB misses both before and after interposing on the trap table.
445*0Sstevel@tonic-gate  * This allows trapstat to determine a per-trap probe effect which can then be
446*0Sstevel@tonic-gate  * factored into the "%tim" fields of the trapstat command.
447*0Sstevel@tonic-gate  *
448*0Sstevel@tonic-gate  * Note that on sun4v platforms, TLB misses are normally handled by the
449*0Sstevel@tonic-gate  * hypervisor or the hardware TSB walker. Thus no fast MMU miss information
450*0Sstevel@tonic-gate  * is reported for normal operation. However, when trapstat is invoked with
451*0Sstevel@tonic-gate  * -t or -T option to collect detailed TLB statistics, kernel takes
452*0Sstevel@tonic-gate  * over TLB miss handling. This results in significantly more overhead
453*0Sstevel@tonic-gate  * and TLB statistics may not be as accurate as on sun4u platforms.
454*0Sstevel@tonic-gate  *
455*0Sstevel@tonic-gate  * Locking
456*0Sstevel@tonic-gate  *
457*0Sstevel@tonic-gate  * The implementation uses two locks:  tstat_lock (a local lock) and the global
458*0Sstevel@tonic-gate  * cpu_lock.  tstat_lock is used to assure trapstat's consistency in the
459*0Sstevel@tonic-gate  * presence of multithreaded /dev/trapstat consumers (while as of this writing
460*0Sstevel@tonic-gate  * the only consumer of /dev/trapstat is single threaded, it is obviously
461*0Sstevel@tonic-gate  * necessary to correctly support multithreaded access).  cpu_lock is held
462*0Sstevel@tonic-gate  * whenever CPUs are being manipulated directly, to prevent them from
463*0Sstevel@tonic-gate  * disappearing in the process.  Because trapstat's DR callback
464*0Sstevel@tonic-gate  * (trapstat_cpu_setup()) must grab tstat_lock and is called with cpu_lock
465*0Sstevel@tonic-gate  * held, the lock ordering is necessarily cpu_lock before tstat_lock.
466*0Sstevel@tonic-gate  *
467*0Sstevel@tonic-gate  */
468*0Sstevel@tonic-gate /* END CSTYLED */
469*0Sstevel@tonic-gate 
470*0Sstevel@tonic-gate static dev_info_t	*tstat_devi;	/* saved in xxattach() for xxinfo() */
471*0Sstevel@tonic-gate static int		tstat_open;	/* set if driver is open */
472*0Sstevel@tonic-gate static kmutex_t		tstat_lock;	/* serialize access */
473*0Sstevel@tonic-gate static vmem_t		*tstat_arena;	/* arena for TLB-locked pages */
474*0Sstevel@tonic-gate static tstat_percpu_t	*tstat_percpu;	/* per-CPU data */
475*0Sstevel@tonic-gate static int		tstat_running;	/* set if trapstat is running */
476*0Sstevel@tonic-gate static tstat_data_t	*tstat_buffer;	/* staging buffer for outgoing data */
477*0Sstevel@tonic-gate static int		tstat_options;	/* bit-wise indication of options */
478*0Sstevel@tonic-gate static int		*tstat_enabled;	/* map of enabled trap entries */
479*0Sstevel@tonic-gate static int		tstat_tsbmiss_patched; /* tsbmiss patch flag */
480*0Sstevel@tonic-gate static callb_id_t	tstat_cprcb;	/* CPR callback */
481*0Sstevel@tonic-gate static char		*tstat_probe_area; /* VA range used for probe effect */
482*0Sstevel@tonic-gate static caddr_t		tstat_probe_phys; /* physical to back above VA */
483*0Sstevel@tonic-gate static hrtime_t		tstat_probe_time; /* time spent on probe effect */
484*0Sstevel@tonic-gate static hrtime_t		tstat_probe_before[TSTAT_PROBE_NLAPS];
485*0Sstevel@tonic-gate static hrtime_t		tstat_probe_after[TSTAT_PROBE_NLAPS];
486*0Sstevel@tonic-gate static uint_t		tstat_pgszs;		/* # of kernel page sizes */
487*0Sstevel@tonic-gate static uint_t		tstat_user_pgszs;	/* # of user page sizes */
488*0Sstevel@tonic-gate 
489*0Sstevel@tonic-gate /*
490*0Sstevel@tonic-gate  * sizeof tstat_data_t + pgsz data for the kernel.  For simplicity's sake, when
491*0Sstevel@tonic-gate  * we collect data, we do it based upon szc, but when we report data back to
492*0Sstevel@tonic-gate  * userland, we have to do it based upon the userszc which may not match.
493*0Sstevel@tonic-gate  * So, these two variables are for internal use and exported use respectively.
494*0Sstevel@tonic-gate  */
495*0Sstevel@tonic-gate static size_t		tstat_data_t_size;
496*0Sstevel@tonic-gate static size_t		tstat_data_t_exported_size;
497*0Sstevel@tonic-gate 
498*0Sstevel@tonic-gate static size_t		tstat_data_pages;  /* number of pages of tstat data */
499*0Sstevel@tonic-gate static size_t		tstat_data_size;   /* tstat data size in bytes */
500*0Sstevel@tonic-gate static size_t		tstat_total_pages; /* #data pages + #instr pages */
501*0Sstevel@tonic-gate static size_t		tstat_total_size;  /* tstat data size + instr size */
502*0Sstevel@tonic-gate #ifdef sun4v
503*0Sstevel@tonic-gate static caddr_t		tstat_va;	/* VA of memory reserved for TBA */
504*0Sstevel@tonic-gate static pfn_t		tstat_pfn;	/* PFN of memory reserved for TBA */
505*0Sstevel@tonic-gate #endif
506*0Sstevel@tonic-gate 
507*0Sstevel@tonic-gate /*
508*0Sstevel@tonic-gate  * In the above block comment, see "TLB Statistics: TLB Misses versus
509*0Sstevel@tonic-gate  * TSB Misses" for an explanation of the tsbmiss patch points.
510*0Sstevel@tonic-gate  */
511*0Sstevel@tonic-gate extern uint32_t		tsbmiss_trapstat_patch_point;
512*0Sstevel@tonic-gate extern uint32_t		tsbmiss_trapstat_patch_point_kpm;
513*0Sstevel@tonic-gate extern uint32_t		tsbmiss_trapstat_patch_point_kpm_small;
514*0Sstevel@tonic-gate 
515*0Sstevel@tonic-gate /*
516*0Sstevel@tonic-gate  * Trapstat tsbmiss patch table
517*0Sstevel@tonic-gate  */
518*0Sstevel@tonic-gate tstat_tsbmiss_patch_entry_t tstat_tsbmiss_patch_table[] = {
519*0Sstevel@tonic-gate 	{(uint32_t *)&tsbmiss_trapstat_patch_point, 0},
520*0Sstevel@tonic-gate 	{(uint32_t *)&tsbmiss_trapstat_patch_point_kpm, 0},
521*0Sstevel@tonic-gate 	{(uint32_t *)&tsbmiss_trapstat_patch_point_kpm_small, 0},
522*0Sstevel@tonic-gate 	{(uint32_t *)NULL, 0}
523*0Sstevel@tonic-gate };
524*0Sstevel@tonic-gate 
525*0Sstevel@tonic-gate /*
526*0Sstevel@tonic-gate  * We define some general SPARC-specific constants to allow more readable
527*0Sstevel@tonic-gate  * relocations.
528*0Sstevel@tonic-gate  */
529*0Sstevel@tonic-gate #define	NOP	0x01000000
530*0Sstevel@tonic-gate #define	HI22(v) ((uint32_t)(v) >> 10)
531*0Sstevel@tonic-gate #define	LO10(v) ((uint32_t)(v) & 0x3ff)
532*0Sstevel@tonic-gate #define	LO12(v) ((uint32_t)(v) & 0xfff)
533*0Sstevel@tonic-gate #define	DISP22(from, to) \
534*0Sstevel@tonic-gate 	((((uintptr_t)(to) - (uintptr_t)(from)) >> 2) & 0x3fffff)
535*0Sstevel@tonic-gate #define	ASI(asi)	((asi) << 5)
536*0Sstevel@tonic-gate 
537*0Sstevel@tonic-gate /*
538*0Sstevel@tonic-gate  * The interposing trap table must be locked in the I-TLB, and any data
539*0Sstevel@tonic-gate  * referred to in the interposing trap handler must be locked in the D-TLB.
540*0Sstevel@tonic-gate  * This function locks these pages in the appropriate TLBs by creating TTEs
541*0Sstevel@tonic-gate  * from whole cloth, and manually loading them into the TLB.  This function is
542*0Sstevel@tonic-gate  * called from cross call context.
543*0Sstevel@tonic-gate  *
544*0Sstevel@tonic-gate  * On sun4v platforms, we use 4M page size mappings to minimize the number
545*0Sstevel@tonic-gate  * of locked down entries (i.e. permanent mappings). Each CPU uses a
546*0Sstevel@tonic-gate  * reserved portion of that 4M page for its TBA and data.
547*0Sstevel@tonic-gate  */
548*0Sstevel@tonic-gate static void
549*0Sstevel@tonic-gate trapstat_load_tlb(void)
550*0Sstevel@tonic-gate {
551*0Sstevel@tonic-gate #ifndef sun4v
552*0Sstevel@tonic-gate 	int i;
553*0Sstevel@tonic-gate #else
554*0Sstevel@tonic-gate 	uint64_t ret;
555*0Sstevel@tonic-gate #endif
556*0Sstevel@tonic-gate 	tte_t tte;
557*0Sstevel@tonic-gate 	tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id];
558*0Sstevel@tonic-gate 	caddr_t va = tcpu->tcpu_vabase;
559*0Sstevel@tonic-gate 
560*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
561*0Sstevel@tonic-gate 	ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED));
562*0Sstevel@tonic-gate 
563*0Sstevel@tonic-gate #ifndef sun4v
564*0Sstevel@tonic-gate 	for (i = 0; i < tstat_total_pages; i++, va += MMU_PAGESIZE) {
565*0Sstevel@tonic-gate 		tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) |
566*0Sstevel@tonic-gate 			TTE_PFN_INTHI(tcpu->tcpu_pfn[i]);
567*0Sstevel@tonic-gate 		if (i < TSTAT_INSTR_PAGES) {
568*0Sstevel@tonic-gate 			tte.tte_intlo = TTE_PFN_INTLO(tcpu->tcpu_pfn[i]) |
569*0Sstevel@tonic-gate 				TTE_LCK_INT | TTE_CP_INT | TTE_PRIV_INT;
570*0Sstevel@tonic-gate 			sfmmu_itlb_ld(va, KCONTEXT, &tte);
571*0Sstevel@tonic-gate 		} else {
572*0Sstevel@tonic-gate 			tte.tte_intlo = TTE_PFN_INTLO(tcpu->tcpu_pfn[i]) |
573*0Sstevel@tonic-gate 				TTE_LCK_INT | TTE_CP_INT | TTE_CV_INT |
574*0Sstevel@tonic-gate 				TTE_PRIV_INT | TTE_HWWR_INT;
575*0Sstevel@tonic-gate 			sfmmu_dtlb_ld(va, KCONTEXT, &tte);
576*0Sstevel@tonic-gate 		}
577*0Sstevel@tonic-gate 	}
578*0Sstevel@tonic-gate #else /* sun4v */
579*0Sstevel@tonic-gate 	tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(tstat_pfn);
580*0Sstevel@tonic-gate 	tte.tte_intlo = TTE_PFN_INTLO(tstat_pfn) | TTE_CP_INT |
581*0Sstevel@tonic-gate 		TTE_CV_INT | TTE_PRIV_INT | TTE_HWWR_INT |
582*0Sstevel@tonic-gate 		TTE_SZ_INTLO(TTE4M);
583*0Sstevel@tonic-gate 	ret = hv_mmu_map_perm_addr(va, KCONTEXT, *(uint64_t *)&tte,
584*0Sstevel@tonic-gate 		MAP_ITLB | MAP_DTLB);
585*0Sstevel@tonic-gate 
586*0Sstevel@tonic-gate 	if (ret != H_EOK)
587*0Sstevel@tonic-gate 		cmn_err(CE_PANIC, "trapstat: cannot map new TBA "
588*0Sstevel@tonic-gate 		    "for cpu %d  (error: 0x%lx)", CPU->cpu_id, ret);
589*0Sstevel@tonic-gate #endif /* sun4v */
590*0Sstevel@tonic-gate }
591*0Sstevel@tonic-gate 
592*0Sstevel@tonic-gate /*
593*0Sstevel@tonic-gate  * As mentioned in the "TLB Statistics: TLB Misses versus TSB Misses" section
594*0Sstevel@tonic-gate  * of the block comment, TLB misses are differentiated from TSB misses in
595*0Sstevel@tonic-gate  * part by hot-patching the instructions at the tsbmiss patch points (see
596*0Sstevel@tonic-gate  * tstat_tsbmiss_patch_table). This routine is used both to initially patch
597*0Sstevel@tonic-gate  * the instructions, and to patch them back to their original values upon
598*0Sstevel@tonic-gate  * restoring the original trap table.
599*0Sstevel@tonic-gate  */
600*0Sstevel@tonic-gate static void
601*0Sstevel@tonic-gate trapstat_hotpatch()
602*0Sstevel@tonic-gate {
603*0Sstevel@tonic-gate 	uint32_t instr;
604*0Sstevel@tonic-gate 	uint32_t simm13;
605*0Sstevel@tonic-gate 	tstat_tsbmiss_patch_entry_t *ep;
606*0Sstevel@tonic-gate 
607*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tstat_lock));
608*0Sstevel@tonic-gate 
609*0Sstevel@tonic-gate 	if (!(tstat_options & TSTAT_OPT_TLBDATA))
610*0Sstevel@tonic-gate 		return;
611*0Sstevel@tonic-gate 
612*0Sstevel@tonic-gate 	if (!tstat_tsbmiss_patched) {
613*0Sstevel@tonic-gate 		/*
614*0Sstevel@tonic-gate 		 * We haven't patched the TSB paths; do so now.
615*0Sstevel@tonic-gate 		 */
616*0Sstevel@tonic-gate 		/*CONSTCOND*/
617*0Sstevel@tonic-gate 		ASSERT(offsetof(tstat_tlbret_t, ttlbr_ktsb) -
618*0Sstevel@tonic-gate 		    offsetof(tstat_tlbret_t, ttlbr_ktlb) ==
619*0Sstevel@tonic-gate 		    offsetof(tstat_tlbret_t, ttlbr_utsb) -
620*0Sstevel@tonic-gate 		    offsetof(tstat_tlbret_t, ttlbr_utlb));
621*0Sstevel@tonic-gate 
622*0Sstevel@tonic-gate 		simm13 = offsetof(tstat_tlbret_t, ttlbr_ktsb) -
623*0Sstevel@tonic-gate 		    offsetof(tstat_tlbret_t, ttlbr_ktlb);
624*0Sstevel@tonic-gate 
625*0Sstevel@tonic-gate 		for (ep = tstat_tsbmiss_patch_table; ep->tpe_addr; ep++) {
626*0Sstevel@tonic-gate 			ASSERT(ep->tpe_instr == 0);
627*0Sstevel@tonic-gate 			instr = ep->tpe_instr = *ep->tpe_addr;
628*0Sstevel@tonic-gate 
629*0Sstevel@tonic-gate 			/*
630*0Sstevel@tonic-gate 			 * Assert that the instruction we're about to patch is
631*0Sstevel@tonic-gate 			 * "add %g7, 0, %g7" (0x8e01e000).
632*0Sstevel@tonic-gate 			 */
633*0Sstevel@tonic-gate 			ASSERT(instr == TSTAT_TSBMISS_INSTR);
634*0Sstevel@tonic-gate 
635*0Sstevel@tonic-gate 			instr |= simm13;
636*0Sstevel@tonic-gate 			hot_patch_kernel_text((caddr_t)ep->tpe_addr,
637*0Sstevel@tonic-gate 			    instr, sizeof (instr));
638*0Sstevel@tonic-gate 		}
639*0Sstevel@tonic-gate 
640*0Sstevel@tonic-gate 		tstat_tsbmiss_patched = 1;
641*0Sstevel@tonic-gate 
642*0Sstevel@tonic-gate 	} else {
643*0Sstevel@tonic-gate 		/*
644*0Sstevel@tonic-gate 		 * Remove patches from the TSB paths.
645*0Sstevel@tonic-gate 		 */
646*0Sstevel@tonic-gate 		for (ep = tstat_tsbmiss_patch_table; ep->tpe_addr; ep++) {
647*0Sstevel@tonic-gate 			ASSERT(ep->tpe_instr == TSTAT_TSBMISS_INSTR);
648*0Sstevel@tonic-gate 			hot_patch_kernel_text((caddr_t)ep->tpe_addr,
649*0Sstevel@tonic-gate 			    ep->tpe_instr, sizeof (instr));
650*0Sstevel@tonic-gate 			ep->tpe_instr = 0;
651*0Sstevel@tonic-gate 		}
652*0Sstevel@tonic-gate 
653*0Sstevel@tonic-gate 		tstat_tsbmiss_patched = 0;
654*0Sstevel@tonic-gate 	}
655*0Sstevel@tonic-gate }
656*0Sstevel@tonic-gate 
657*0Sstevel@tonic-gate /*
658*0Sstevel@tonic-gate  * This is the routine executed to clock the performance of the trap table,
659*0Sstevel@tonic-gate  * executed both before and after interposing on the trap table to attempt to
660*0Sstevel@tonic-gate  * determine probe effect.  The probe effect is used to adjust the "%tim"
661*0Sstevel@tonic-gate  * fields of trapstat's -t and -T output; we only use TLB misses to clock the
662*0Sstevel@tonic-gate  * trap table.  We execute the inner loop (which is designed to exceed the
663*0Sstevel@tonic-gate  * TLB's reach) nlaps times, taking the best time as our time (thereby
664*0Sstevel@tonic-gate  * factoring out the effects of interrupts, cache misses or other perturbing
665*0Sstevel@tonic-gate  * events.
666*0Sstevel@tonic-gate  */
667*0Sstevel@tonic-gate static hrtime_t
668*0Sstevel@tonic-gate trapstat_probe_laps(int nlaps, hrtime_t *buf)
669*0Sstevel@tonic-gate {
670*0Sstevel@tonic-gate 	int i, j = 0;
671*0Sstevel@tonic-gate 	hrtime_t ts, best = INT64_MAX;
672*0Sstevel@tonic-gate 
673*0Sstevel@tonic-gate 	while (nlaps--) {
674*0Sstevel@tonic-gate 		ts = rdtick();
675*0Sstevel@tonic-gate 
676*0Sstevel@tonic-gate 		for (i = 0; i < TSTAT_PROBE_SIZE; i += MMU_PAGESIZE)
677*0Sstevel@tonic-gate 			*((volatile char *)&tstat_probe_area[i]);
678*0Sstevel@tonic-gate 
679*0Sstevel@tonic-gate 		if ((ts = rdtick() - ts) < best)
680*0Sstevel@tonic-gate 			best = ts;
681*0Sstevel@tonic-gate 		buf[j++] = ts;
682*0Sstevel@tonic-gate 	}
683*0Sstevel@tonic-gate 
684*0Sstevel@tonic-gate 	return (best);
685*0Sstevel@tonic-gate }
686*0Sstevel@tonic-gate 
687*0Sstevel@tonic-gate /*
688*0Sstevel@tonic-gate  * This routine determines the probe effect by calling trapstat_probe_laps()
689*0Sstevel@tonic-gate  * both without and with the interposing trap table.  Note that this is
690*0Sstevel@tonic-gate  * called from a cross call on the desired CPU, and that it is called on
691*0Sstevel@tonic-gate  * every CPU (this is necessary because the probe effect may differ from
692*0Sstevel@tonic-gate  * one CPU to another).
693*0Sstevel@tonic-gate  */
694*0Sstevel@tonic-gate static void
695*0Sstevel@tonic-gate trapstat_probe()
696*0Sstevel@tonic-gate {
697*0Sstevel@tonic-gate 	tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id];
698*0Sstevel@tonic-gate 	hrtime_t before, after;
699*0Sstevel@tonic-gate 
700*0Sstevel@tonic-gate 	if (!(tcpu->tcpu_flags & TSTAT_CPU_SELECTED))
701*0Sstevel@tonic-gate 		return;
702*0Sstevel@tonic-gate 
703*0Sstevel@tonic-gate 	if (tstat_probe_area == NULL || (tstat_options & TSTAT_OPT_NOGO))
704*0Sstevel@tonic-gate 		return;
705*0Sstevel@tonic-gate 
706*0Sstevel@tonic-gate 	/*
707*0Sstevel@tonic-gate 	 * We very much expect the %tba to be KERNELBASE; this is a
708*0Sstevel@tonic-gate 	 * precautionary measure to assure that trapstat doesn't melt the
709*0Sstevel@tonic-gate 	 * machine should the %tba point unexpectedly elsewhere.
710*0Sstevel@tonic-gate 	 */
711*0Sstevel@tonic-gate 	if (get_tba() != (caddr_t)KERNELBASE)
712*0Sstevel@tonic-gate 		return;
713*0Sstevel@tonic-gate 
714*0Sstevel@tonic-gate 	/*
715*0Sstevel@tonic-gate 	 * Preserve this CPU's data before destroying it by enabling the
716*0Sstevel@tonic-gate 	 * interposing trap table.  We can safely use tstat_buffer because
717*0Sstevel@tonic-gate 	 * the caller of the trapstat_probe() cross call is holding tstat_lock.
718*0Sstevel@tonic-gate 	 */
719*0Sstevel@tonic-gate 	bcopy(tcpu->tcpu_data, tstat_buffer, tstat_data_t_size);
720*0Sstevel@tonic-gate 
721*0Sstevel@tonic-gate 	tstat_probe_time = gethrtime();
722*0Sstevel@tonic-gate 
723*0Sstevel@tonic-gate 	before = trapstat_probe_laps(TSTAT_PROBE_NLAPS, tstat_probe_before);
724*0Sstevel@tonic-gate 	(void) set_tba(tcpu->tcpu_ibase);
725*0Sstevel@tonic-gate 
726*0Sstevel@tonic-gate 	after = trapstat_probe_laps(TSTAT_PROBE_NLAPS, tstat_probe_after);
727*0Sstevel@tonic-gate 	(void) set_tba((caddr_t)KERNELBASE);
728*0Sstevel@tonic-gate 
729*0Sstevel@tonic-gate 	tstat_probe_time = gethrtime() - tstat_probe_time;
730*0Sstevel@tonic-gate 
731*0Sstevel@tonic-gate 	bcopy(tstat_buffer, tcpu->tcpu_data, tstat_data_t_size);
732*0Sstevel@tonic-gate 	tcpu->tcpu_data->tdata_peffect = (after - before) / TSTAT_PROBE_NPAGES;
733*0Sstevel@tonic-gate }
734*0Sstevel@tonic-gate 
735*0Sstevel@tonic-gate static void
736*0Sstevel@tonic-gate trapstat_probe_alloc()
737*0Sstevel@tonic-gate {
738*0Sstevel@tonic-gate 	pfn_t pfn;
739*0Sstevel@tonic-gate 	caddr_t va;
740*0Sstevel@tonic-gate 	int i;
741*0Sstevel@tonic-gate 
742*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tstat_lock));
743*0Sstevel@tonic-gate 	ASSERT(tstat_probe_area == NULL);
744*0Sstevel@tonic-gate 	ASSERT(tstat_probe_phys == NULL);
745*0Sstevel@tonic-gate 
746*0Sstevel@tonic-gate 	if (!(tstat_options & TSTAT_OPT_TLBDATA))
747*0Sstevel@tonic-gate 		return;
748*0Sstevel@tonic-gate 
749*0Sstevel@tonic-gate 	/*
750*0Sstevel@tonic-gate 	 * Grab some virtual from the heap arena.
751*0Sstevel@tonic-gate 	 */
752*0Sstevel@tonic-gate 	tstat_probe_area = vmem_alloc(heap_arena, TSTAT_PROBE_SIZE, VM_SLEEP);
753*0Sstevel@tonic-gate 	va = tstat_probe_area;
754*0Sstevel@tonic-gate 
755*0Sstevel@tonic-gate 	/*
756*0Sstevel@tonic-gate 	 * Grab a single physical page.
757*0Sstevel@tonic-gate 	 */
758*0Sstevel@tonic-gate 	tstat_probe_phys = vmem_alloc(tstat_arena, MMU_PAGESIZE, VM_SLEEP);
759*0Sstevel@tonic-gate 	pfn = hat_getpfnum(kas.a_hat, tstat_probe_phys);
760*0Sstevel@tonic-gate 
761*0Sstevel@tonic-gate 	/*
762*0Sstevel@tonic-gate 	 * Now set the translation for every page in our virtual range
763*0Sstevel@tonic-gate 	 * to be our allocated physical page.
764*0Sstevel@tonic-gate 	 */
765*0Sstevel@tonic-gate 	for (i = 0; i < TSTAT_PROBE_NPAGES; i++) {
766*0Sstevel@tonic-gate 		hat_devload(kas.a_hat, va, MMU_PAGESIZE, pfn, PROT_READ,
767*0Sstevel@tonic-gate 		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
768*0Sstevel@tonic-gate 		va += MMU_PAGESIZE;
769*0Sstevel@tonic-gate 	}
770*0Sstevel@tonic-gate }
771*0Sstevel@tonic-gate 
772*0Sstevel@tonic-gate static void
773*0Sstevel@tonic-gate trapstat_probe_free()
774*0Sstevel@tonic-gate {
775*0Sstevel@tonic-gate 	caddr_t va;
776*0Sstevel@tonic-gate 	int i;
777*0Sstevel@tonic-gate 
778*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tstat_lock));
779*0Sstevel@tonic-gate 
780*0Sstevel@tonic-gate 	if ((va = tstat_probe_area) == NULL)
781*0Sstevel@tonic-gate 		return;
782*0Sstevel@tonic-gate 
783*0Sstevel@tonic-gate 	for (i = 0; i < TSTAT_PROBE_NPAGES; i++) {
784*0Sstevel@tonic-gate 		hat_unload(kas.a_hat, va, MMU_PAGESIZE, HAT_UNLOAD_UNLOCK);
785*0Sstevel@tonic-gate 		va += MMU_PAGESIZE;
786*0Sstevel@tonic-gate 	}
787*0Sstevel@tonic-gate 
788*0Sstevel@tonic-gate 	vmem_free(tstat_arena, tstat_probe_phys, MMU_PAGESIZE);
789*0Sstevel@tonic-gate 	vmem_free(heap_arena, tstat_probe_area, TSTAT_PROBE_SIZE);
790*0Sstevel@tonic-gate 
791*0Sstevel@tonic-gate 	tstat_probe_phys = NULL;
792*0Sstevel@tonic-gate 	tstat_probe_area = NULL;
793*0Sstevel@tonic-gate }
794*0Sstevel@tonic-gate 
795*0Sstevel@tonic-gate /*
796*0Sstevel@tonic-gate  * This routine actually enables a CPU by setting its %tba to be the
797*0Sstevel@tonic-gate  * CPU's interposing trap table.  It is called out of cross call context.
798*0Sstevel@tonic-gate  */
799*0Sstevel@tonic-gate static void
800*0Sstevel@tonic-gate trapstat_enable()
801*0Sstevel@tonic-gate {
802*0Sstevel@tonic-gate 	tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id];
803*0Sstevel@tonic-gate 
804*0Sstevel@tonic-gate 	if (!(tcpu->tcpu_flags & TSTAT_CPU_SELECTED))
805*0Sstevel@tonic-gate 		return;
806*0Sstevel@tonic-gate 
807*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
808*0Sstevel@tonic-gate 	ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED));
809*0Sstevel@tonic-gate 
810*0Sstevel@tonic-gate 	if (get_tba() != (caddr_t)KERNELBASE)
811*0Sstevel@tonic-gate 		return;
812*0Sstevel@tonic-gate 
813*0Sstevel@tonic-gate 	if (!(tstat_options & TSTAT_OPT_NOGO))
814*0Sstevel@tonic-gate 		(void) set_tba(tcpu->tcpu_ibase);
815*0Sstevel@tonic-gate 	tcpu->tcpu_flags |= TSTAT_CPU_ENABLED;
816*0Sstevel@tonic-gate #ifdef sun4v
817*0Sstevel@tonic-gate 	if (tstat_options & (TSTAT_OPT_TLBDATA | TSTAT_OPT_NOGO)) {
818*0Sstevel@tonic-gate 		/*
819*0Sstevel@tonic-gate 		 * On sun4v platforms, TLB misses are normally handled by the
820*0Sstevel@tonic-gate 		 * hypervisor or the hardware -- provided one or more TSBs
821*0Sstevel@tonic-gate 		 * have been setup and communicated via hv_set_ctx0 and
822*0Sstevel@tonic-gate 		 * hv_set_nonctx0 API.  However, as part of collecting TLB
823*0Sstevel@tonic-gate 		 * statistics, we disabled this miss processing by telling the
824*0Sstevel@tonic-gate 		 * hypervisor that there was not a TSB; we now need to
825*0Sstevel@tonic-gate 		 * communicate the proper kernel/user TSB information to
826*0Sstevel@tonic-gate 		 * resume efficient operation.
827*0Sstevel@tonic-gate 		 *
828*0Sstevel@tonic-gate 		 * While we restore kernel TSB information immediately, to
829*0Sstevel@tonic-gate 		 * avoid any locking dependency, we don't restore user TSB
830*0Sstevel@tonic-gate 		 * information right away.  Rather, we simply clear the
831*0Sstevel@tonic-gate 		 * TSTAT_TLB_STATS flag so that the user TSB information is
832*0Sstevel@tonic-gate 		 * automatically restored on the next context switch.
833*0Sstevel@tonic-gate 		 *
834*0Sstevel@tonic-gate 		 * Note that the call to restore kernel TSB information is not
835*0Sstevel@tonic-gate 		 * expected to fail.  Even in the event of failure, the system
836*0Sstevel@tonic-gate 		 * will still continue to function properly, if in a state of
837*0Sstevel@tonic-gate 		 * reduced performance due to the guest kernel handling all
838*0Sstevel@tonic-gate 		 * TLB misses.
839*0Sstevel@tonic-gate 		 */
840*0Sstevel@tonic-gate 		cpu_t *cp = CPU;
841*0Sstevel@tonic-gate 
842*0Sstevel@tonic-gate 		cp->cpu_m.cpu_tstat_flags |= TSTAT_TLB_STATS;
843*0Sstevel@tonic-gate 		(void) hv_set_ctx0(NULL, NULL);
844*0Sstevel@tonic-gate 		(void) hv_set_ctxnon0(NULL, NULL);
845*0Sstevel@tonic-gate 	}
846*0Sstevel@tonic-gate #endif
847*0Sstevel@tonic-gate }
848*0Sstevel@tonic-gate 
849*0Sstevel@tonic-gate /*
850*0Sstevel@tonic-gate  * This routine disables a CPU (vis a vis trapstat) by setting its %tba to be
851*0Sstevel@tonic-gate  * the actual, underlying trap table.  It is called out of cross call context.
852*0Sstevel@tonic-gate  */
853*0Sstevel@tonic-gate static void
854*0Sstevel@tonic-gate trapstat_disable()
855*0Sstevel@tonic-gate {
856*0Sstevel@tonic-gate 	tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id];
857*0Sstevel@tonic-gate 
858*0Sstevel@tonic-gate 	if (!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED))
859*0Sstevel@tonic-gate 		return;
860*0Sstevel@tonic-gate 
861*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED);
862*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
863*0Sstevel@tonic-gate 
864*0Sstevel@tonic-gate 	if (!(tstat_options & TSTAT_OPT_NOGO))
865*0Sstevel@tonic-gate 		(void) set_tba((caddr_t)KERNELBASE);
866*0Sstevel@tonic-gate 
867*0Sstevel@tonic-gate 	tcpu->tcpu_flags &= ~TSTAT_CPU_ENABLED;
868*0Sstevel@tonic-gate 
869*0Sstevel@tonic-gate #ifdef sun4v
870*0Sstevel@tonic-gate 	if (tstat_options & (TSTAT_OPT_TLBDATA | TSTAT_OPT_NOGO)) {
871*0Sstevel@tonic-gate 		/*
872*0Sstevel@tonic-gate 		 * On sun4v platforms, TlB misses are normally handled by
873*0Sstevel@tonic-gate 		 * the hypervisor or the hardware provided one or more TSBs
874*0Sstevel@tonic-gate 		 * have been setup and communicated via hv_set_ctx0 and
875*0Sstevel@tonic-gate 		 * hv_set_nonctx0 API. However, as part of collecting TLB
876*0Sstevel@tonic-gate 		 * statistics, we disabled that by faking NO TSB and we
877*0Sstevel@tonic-gate 		 * need to communicate proper kernel/user TSB information
878*0Sstevel@tonic-gate 		 * so that TLB misses can be handled by the hypervisor or
879*0Sstevel@tonic-gate 		 * the hardware more efficiently.
880*0Sstevel@tonic-gate 		 *
881*0Sstevel@tonic-gate 		 * We restore kernel TSB information right away. However,
882*0Sstevel@tonic-gate 		 * to minimize any locking dependency, we don't restore
883*0Sstevel@tonic-gate 		 * user TSB information right away. Instead, we simply
884*0Sstevel@tonic-gate 		 * clear the TSTAT_TLB_STATS flag so that the user TSB
885*0Sstevel@tonic-gate 		 * information is automatically restored on next context
886*0Sstevel@tonic-gate 		 * switch.
887*0Sstevel@tonic-gate 		 *
888*0Sstevel@tonic-gate 		 * Note that the call to restore kernel TSB information
889*0Sstevel@tonic-gate 		 * will normally not fail, unless wrong information is
890*0Sstevel@tonic-gate 		 * passed here. In that scenario, system will still
891*0Sstevel@tonic-gate 		 * continue to function properly with the exception of
892*0Sstevel@tonic-gate 		 * kernel handling all the TLB misses.
893*0Sstevel@tonic-gate 		 */
894*0Sstevel@tonic-gate 		struct hv_tsb_block *hvbp = &ksfmmup->sfmmu_hvblock;
895*0Sstevel@tonic-gate 		cpu_t *cp = CPU;
896*0Sstevel@tonic-gate 
897*0Sstevel@tonic-gate 		cp->cpu_m.cpu_tstat_flags &= ~TSTAT_TLB_STATS;
898*0Sstevel@tonic-gate 		(void) hv_set_ctx0(hvbp->hv_tsb_info_cnt, hvbp->hv_tsb_info_pa);
899*0Sstevel@tonic-gate 	}
900*0Sstevel@tonic-gate #endif
901*0Sstevel@tonic-gate }
902*0Sstevel@tonic-gate 
903*0Sstevel@tonic-gate /*
904*0Sstevel@tonic-gate  * We use %tick as the time base when recording the time spent executing
905*0Sstevel@tonic-gate  * the trap handler.  %tick, however, is not necessarily kept in sync
906*0Sstevel@tonic-gate  * across CPUs (indeed, different CPUs may have different %tick frequencies).
907*0Sstevel@tonic-gate  * We therefore cross call onto a CPU to get a snapshot of its data to
908*0Sstevel@tonic-gate  * copy out; this is the routine executed out of that cross call.
909*0Sstevel@tonic-gate  */
910*0Sstevel@tonic-gate static void
911*0Sstevel@tonic-gate trapstat_snapshot()
912*0Sstevel@tonic-gate {
913*0Sstevel@tonic-gate 	tstat_percpu_t *tcpu = &tstat_percpu[CPU->cpu_id];
914*0Sstevel@tonic-gate 	tstat_data_t *data = tcpu->tcpu_data;
915*0Sstevel@tonic-gate 
916*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED);
917*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
918*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ENABLED);
919*0Sstevel@tonic-gate 
920*0Sstevel@tonic-gate 	data->tdata_snapts = gethrtime();
921*0Sstevel@tonic-gate 	data->tdata_snaptick = rdtick();
922*0Sstevel@tonic-gate 	bcopy(data, tstat_buffer, tstat_data_t_size);
923*0Sstevel@tonic-gate }
924*0Sstevel@tonic-gate 
925*0Sstevel@tonic-gate /*
926*0Sstevel@tonic-gate  * The TSTAT_RETENT_* constants define offsets in the TLB return entry.
927*0Sstevel@tonic-gate  * They are used only in trapstat_tlbretent() (below) and #undef'd
928*0Sstevel@tonic-gate  * immediately afterwards.  Any change to "retent" in trapstat_tlbretent()
929*0Sstevel@tonic-gate  * will likely require changes to these constants.
930*0Sstevel@tonic-gate  */
931*0Sstevel@tonic-gate 
932*0Sstevel@tonic-gate #ifndef	sun4v
933*0Sstevel@tonic-gate #define	TSTAT_RETENT_STATHI	1
934*0Sstevel@tonic-gate #define	TSTAT_RETENT_STATLO	2
935*0Sstevel@tonic-gate #define	TSTAT_RETENT_SHIFT	8
936*0Sstevel@tonic-gate #define	TSTAT_RETENT_COUNT_LD	10
937*0Sstevel@tonic-gate #define	TSTAT_RETENT_COUNT_ST	12
938*0Sstevel@tonic-gate #define	TSTAT_RETENT_TMPTSHI	13
939*0Sstevel@tonic-gate #define	TSTAT_RETENT_TMPTSLO	14
940*0Sstevel@tonic-gate #define	TSTAT_RETENT_TIME_LD	16
941*0Sstevel@tonic-gate #define	TSTAT_RETENT_TIME_ST	18
942*0Sstevel@tonic-gate #else /* sun4v */
943*0Sstevel@tonic-gate #define	TSTAT_RETENT_STATHI	1
944*0Sstevel@tonic-gate #define	TSTAT_RETENT_STATLO	2
945*0Sstevel@tonic-gate #define	TSTAT_RETENT_SHIFT	5
946*0Sstevel@tonic-gate #define	TSTAT_RETENT_COUNT_LD	7
947*0Sstevel@tonic-gate #define	TSTAT_RETENT_COUNT_ST	9
948*0Sstevel@tonic-gate #define	TSTAT_RETENT_TMPTSHI	10
949*0Sstevel@tonic-gate #define	TSTAT_RETENT_TMPTSLO	11
950*0Sstevel@tonic-gate #define	TSTAT_RETENT_TIME_LD	13
951*0Sstevel@tonic-gate #define	TSTAT_RETENT_TIME_ST	15
952*0Sstevel@tonic-gate #endif /* sun4v */
953*0Sstevel@tonic-gate 
954*0Sstevel@tonic-gate static void
955*0Sstevel@tonic-gate trapstat_tlbretent(tstat_percpu_t *tcpu, tstat_tlbretent_t *ret,
956*0Sstevel@tonic-gate     tstat_missdata_t *data)
957*0Sstevel@tonic-gate {
958*0Sstevel@tonic-gate 	uint32_t *ent = ret->ttlbrent_instr, shift;
959*0Sstevel@tonic-gate 	uintptr_t base, tmptick = TSTAT_DATA_OFFS(tcpu, tdata_tmptick);
960*0Sstevel@tonic-gate 
961*0Sstevel@tonic-gate 	/*
962*0Sstevel@tonic-gate 	 * This is the entry executed upon return from the TLB/TSB miss
963*0Sstevel@tonic-gate 	 * handler (i.e. the code interpositioned between the "retry" and
964*0Sstevel@tonic-gate 	 * the actual return to the TLB-missing instruction).  Detail on its
965*0Sstevel@tonic-gate 	 * theory of operation can be found in the "TLB Statistics" section
966*0Sstevel@tonic-gate 	 * of the block comment.  Note that we expect the TTE just loaded
967*0Sstevel@tonic-gate 	 * into the TLB to be in %g5; all other globals are available as
968*0Sstevel@tonic-gate 	 * scratch.  Finally, note that the page size information in sun4v is
969*0Sstevel@tonic-gate 	 * located in the lower bits of the TTE -- requiring us to have a
970*0Sstevel@tonic-gate 	 * different return entry on sun4v.
971*0Sstevel@tonic-gate 	 */
972*0Sstevel@tonic-gate 	static const uint32_t retent[TSTAT_TLBRET_NINSTR] = {
973*0Sstevel@tonic-gate #ifndef sun4v
974*0Sstevel@tonic-gate 	    0x87410000,		/* rd    %tick, %g3			*/
975*0Sstevel@tonic-gate 	    0x03000000, 	/* sethi %hi(stat), %g1			*/
976*0Sstevel@tonic-gate 	    0x82106000,		/* or    %g1, %lo(stat), %g1		*/
977*0Sstevel@tonic-gate 	    0x89297001,		/* sllx  %g5, 1, %g4			*/
978*0Sstevel@tonic-gate 	    0x8931303e,		/* srlx  %g4, 62, %g4			*/
979*0Sstevel@tonic-gate 	    0x8531702e,		/* srlx  %g5, 46, %g2			*/
980*0Sstevel@tonic-gate 	    0x8408a004,		/* and   %g2, 4, %g2			*/
981*0Sstevel@tonic-gate 	    0x88110002,		/* or    %g4, %g2, %g4			*/
982*0Sstevel@tonic-gate 	    0x89292000,		/* sll   %g4, shift, %g4		*/
983*0Sstevel@tonic-gate 	    0x82004004,		/* add   %g1, %g4, %g1			*/
984*0Sstevel@tonic-gate 	    0xc4586000,		/* ldx   [%g1 + tmiss_count], %g2	*/
985*0Sstevel@tonic-gate 	    0x8400a001,		/* add   %g2, 1, %g2			*/
986*0Sstevel@tonic-gate 	    0xc4706000,		/* stx   %g2, [%g1 + tmiss_count]	*/
987*0Sstevel@tonic-gate 	    0x0d000000, 	/* sethi %hi(tdata_tmptick), %g6	*/
988*0Sstevel@tonic-gate 	    0xc459a000, 	/* ldx   [%g6 + %lo(tdata_tmptick)], %g2 */
989*0Sstevel@tonic-gate 	    0x8620c002,		/* sub   %g3, %g2, %g3			*/
990*0Sstevel@tonic-gate 	    0xc4586000,		/* ldx   [%g1 + tmiss_time], %g2	*/
991*0Sstevel@tonic-gate 	    0x84008003,		/* add   %g2, %g3, %g2			*/
992*0Sstevel@tonic-gate 	    0xc4706000,		/* stx   %g2, [%g1 + tmiss_time]	*/
993*0Sstevel@tonic-gate 	    0x83f00000		/* retry				*/
994*0Sstevel@tonic-gate #else /* sun4v */
995*0Sstevel@tonic-gate 	    0x87410000,		/* rd    %tick, %g3			*/
996*0Sstevel@tonic-gate 	    0x03000000, 	/* sethi %hi(stat), %g1			*/
997*0Sstevel@tonic-gate 	    0x82106000,		/* or    %g1, %lo(stat), %g1		*/
998*0Sstevel@tonic-gate 	    0x8929703d,		/* sllx  %g5, 61, %g4			*/
999*0Sstevel@tonic-gate 	    0x8931303d,		/* srlx  %g4, 61, %g4			*/
1000*0Sstevel@tonic-gate 	    0x89292000,		/* sll   %g4, shift, %g4		*/
1001*0Sstevel@tonic-gate 	    0x82004004,		/* add   %g1, %g4, %g1			*/
1002*0Sstevel@tonic-gate 	    0xc4586000,		/* ldx   [%g1 + tmiss_count], %g2	*/
1003*0Sstevel@tonic-gate 	    0x8400a001,		/* add   %g2, 1, %g2			*/
1004*0Sstevel@tonic-gate 	    0xc4706000,		/* stx   %g2, [%g1 + tmiss_count]	*/
1005*0Sstevel@tonic-gate 	    0x0d000000, 	/* sethi %hi(tdata_tmptick), %g6	*/
1006*0Sstevel@tonic-gate 	    0xc459a000, 	/* ldx   [%g6 + %lo(tdata_tmptick)], %g2 */
1007*0Sstevel@tonic-gate 	    0x8620c002,		/* sub   %g3, %g2, %g3			*/
1008*0Sstevel@tonic-gate 	    0xc4586000,		/* ldx   [%g1 + tmiss_time], %g2	*/
1009*0Sstevel@tonic-gate 	    0x84008003,		/* add   %g2, %g3, %g2			*/
1010*0Sstevel@tonic-gate 	    0xc4706000,		/* stx   %g2, [%g1 + tmiss_time]	*/
1011*0Sstevel@tonic-gate 	    0x83f00000		/* retry				*/
1012*0Sstevel@tonic-gate #endif /* sun4v */
1013*0Sstevel@tonic-gate 	};
1014*0Sstevel@tonic-gate 
1015*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tstat_lock));
1016*0Sstevel@tonic-gate 	/*CONSTCOND*/
1017*0Sstevel@tonic-gate 	ASSERT(offsetof(tstat_missdata_t, tmiss_count) <= LO10(-1));
1018*0Sstevel@tonic-gate 	/*CONSTCOND*/
1019*0Sstevel@tonic-gate 	ASSERT(offsetof(tstat_missdata_t, tmiss_time) <= LO10(-1));
1020*0Sstevel@tonic-gate 	/*CONSTCOND*/
1021*0Sstevel@tonic-gate 	ASSERT(!((sizeof (tstat_pgszdata_t) - 1) & sizeof (tstat_pgszdata_t)));
1022*0Sstevel@tonic-gate 
1023*0Sstevel@tonic-gate 	for (shift = 1; (1 << shift) != sizeof (tstat_pgszdata_t); shift++)
1024*0Sstevel@tonic-gate 		continue;
1025*0Sstevel@tonic-gate 
1026*0Sstevel@tonic-gate 	base = (uintptr_t)tcpu->tcpu_dbase +
1027*0Sstevel@tonic-gate 	    ((uintptr_t)data - (uintptr_t)tcpu->tcpu_data);
1028*0Sstevel@tonic-gate 
1029*0Sstevel@tonic-gate 	bcopy(retent, ent, sizeof (retent));
1030*0Sstevel@tonic-gate 
1031*0Sstevel@tonic-gate 	ent[TSTAT_RETENT_STATHI] |= HI22(base);
1032*0Sstevel@tonic-gate 	ent[TSTAT_RETENT_STATLO] |= LO10(base);
1033*0Sstevel@tonic-gate 	ent[TSTAT_RETENT_SHIFT] |= shift;
1034*0Sstevel@tonic-gate 	/* LINTED E_EXPR_NULL_EFFECT */
1035*0Sstevel@tonic-gate 	ent[TSTAT_RETENT_COUNT_LD] |= offsetof(tstat_missdata_t, tmiss_count);
1036*0Sstevel@tonic-gate 	/* LINTED E_EXPR_NULL_EFFECT */
1037*0Sstevel@tonic-gate 	ent[TSTAT_RETENT_COUNT_ST] |= offsetof(tstat_missdata_t, tmiss_count);
1038*0Sstevel@tonic-gate 	ent[TSTAT_RETENT_TMPTSHI] |= HI22(tmptick);
1039*0Sstevel@tonic-gate 	ent[TSTAT_RETENT_TMPTSLO] |= LO10(tmptick);
1040*0Sstevel@tonic-gate 	ent[TSTAT_RETENT_TIME_LD] |= offsetof(tstat_missdata_t, tmiss_time);
1041*0Sstevel@tonic-gate 	ent[TSTAT_RETENT_TIME_ST] |= offsetof(tstat_missdata_t, tmiss_time);
1042*0Sstevel@tonic-gate }
1043*0Sstevel@tonic-gate 
1044*0Sstevel@tonic-gate #undef TSTAT_RETENT_STATHI
1045*0Sstevel@tonic-gate #undef TSTAT_RETENT_STATLO
1046*0Sstevel@tonic-gate #undef TSTAT_RETENT_SHIFT
1047*0Sstevel@tonic-gate #undef TSTAT_RETENT_COUNT_LD
1048*0Sstevel@tonic-gate #undef TSTAT_RETENT_COUNT_ST
1049*0Sstevel@tonic-gate #undef TSTAT_RETENT_TMPTSHI
1050*0Sstevel@tonic-gate #undef TSTAT_RETENT_TMPTSLO
1051*0Sstevel@tonic-gate #undef TSTAT_RETENT_TIME_LD
1052*0Sstevel@tonic-gate #undef TSTAT_RETENT_TIME_ST
1053*0Sstevel@tonic-gate 
1054*0Sstevel@tonic-gate /*
1055*0Sstevel@tonic-gate  * The TSTAT_TLBENT_* constants define offsets in the TLB entry.  They are
1056*0Sstevel@tonic-gate  * used only in trapstat_tlbent() (below) and #undef'd immediately afterwards.
1057*0Sstevel@tonic-gate  * Any change to "tlbent" in trapstat_tlbent() will likely require changes
1058*0Sstevel@tonic-gate  * to these constants.
1059*0Sstevel@tonic-gate  */
1060*0Sstevel@tonic-gate 
1061*0Sstevel@tonic-gate #ifndef sun4v
1062*0Sstevel@tonic-gate #define	TSTAT_TLBENT_STATHI	0
1063*0Sstevel@tonic-gate #define	TSTAT_TLBENT_STATLO_LD	1
1064*0Sstevel@tonic-gate #define	TSTAT_TLBENT_STATLO_ST	3
1065*0Sstevel@tonic-gate #define	TSTAT_TLBENT_MMUASI	15
1066*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TPCHI	18
1067*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TPCLO_USER	19
1068*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TPCLO_KERN	21
1069*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TSHI	25
1070*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TSLO	27
1071*0Sstevel@tonic-gate #define	TSTAT_TLBENT_BA		28
1072*0Sstevel@tonic-gate #else /* sun4v */
1073*0Sstevel@tonic-gate #define	TSTAT_TLBENT_STATHI	0
1074*0Sstevel@tonic-gate #define	TSTAT_TLBENT_STATLO_LD	1
1075*0Sstevel@tonic-gate #define	TSTAT_TLBENT_STATLO_ST	3
1076*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TAGTARGET	19
1077*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TPCHI	21
1078*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TPCLO_USER	22
1079*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TPCLO_KERN	24
1080*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TSHI	28
1081*0Sstevel@tonic-gate #define	TSTAT_TLBENT_TSLO	30
1082*0Sstevel@tonic-gate #define	TSTAT_TLBENT_BA		31
1083*0Sstevel@tonic-gate #endif /* sun4v */
1084*0Sstevel@tonic-gate 
1085*0Sstevel@tonic-gate static void
1086*0Sstevel@tonic-gate trapstat_tlbent(tstat_percpu_t *tcpu, int entno)
1087*0Sstevel@tonic-gate {
1088*0Sstevel@tonic-gate 	uint32_t *ent;
1089*0Sstevel@tonic-gate 	uintptr_t orig, va, baoffs;
1090*0Sstevel@tonic-gate 	int itlb = entno == TSTAT_ENT_ITLBMISS;
1091*0Sstevel@tonic-gate 	int entoffs = entno << TSTAT_ENT_SHIFT;
1092*0Sstevel@tonic-gate 	uintptr_t tmptick, stat, tpc, utpc;
1093*0Sstevel@tonic-gate 	tstat_pgszdata_t *data = &tcpu->tcpu_data->tdata_pgsz[0];
1094*0Sstevel@tonic-gate 	tstat_tlbdata_t *udata, *kdata;
1095*0Sstevel@tonic-gate 	tstat_tlbret_t *ret;
1096*0Sstevel@tonic-gate #ifndef sun4v
1097*0Sstevel@tonic-gate 	uint32_t asi = itlb ? ASI(ASI_IMMU) : ASI(ASI_DMMU);
1098*0Sstevel@tonic-gate #else
1099*0Sstevel@tonic-gate 	uint32_t tagtarget_off = itlb ? MMFSA_I_CTX : MMFSA_D_CTX;
1100*0Sstevel@tonic-gate #endif
1101*0Sstevel@tonic-gate 
1102*0Sstevel@tonic-gate 	/*
1103*0Sstevel@tonic-gate 	 * When trapstat is run with TLB statistics, this is the entry for
1104*0Sstevel@tonic-gate 	 * both I- and D-TLB misses; this code performs trap level pushing,
1105*0Sstevel@tonic-gate 	 * as described in the "TLB Statistics" section of the block comment.
1106*0Sstevel@tonic-gate 	 * This code is executing at TL 1; %tstate[0] contains the saved
1107*0Sstevel@tonic-gate 	 * state at the time of the TLB miss.  Pushing trap level 1 (and thus
1108*0Sstevel@tonic-gate 	 * raising TL to 2) requires us to fill in %tstate[1] with our %pstate,
1109*0Sstevel@tonic-gate 	 * %cwp and %asi.  We leave %tt unchanged, and we set %tpc and %tnpc to
1110*0Sstevel@tonic-gate 	 * the appropriate TLB return entry (based on the context of the miss).
1111*0Sstevel@tonic-gate 	 * Finally, we sample %tick, and stash it in the tdata_tmptick member
1112*0Sstevel@tonic-gate 	 * the per-CPU tstat_data structure.  tdata_tmptick will be used in
1113*0Sstevel@tonic-gate 	 * the TLB return entry to determine the amount of time spent in the
1114*0Sstevel@tonic-gate 	 * TLB miss handler.
1115*0Sstevel@tonic-gate 	 *
1116*0Sstevel@tonic-gate 	 * Note that on sun4v platforms, we must also force the %gl value to 1
1117*0Sstevel@tonic-gate 	 * in %tstate and we must obtain the context information from the MMU
1118*0Sstevel@tonic-gate 	 * fault status area. (The base address of this MMU fault status area
1119*0Sstevel@tonic-gate 	 * is kept in the scratchpad register 0.)
1120*0Sstevel@tonic-gate 	 */
1121*0Sstevel@tonic-gate 	static const uint32_t tlbent[] = {
1122*0Sstevel@tonic-gate #ifndef sun4v
1123*0Sstevel@tonic-gate 	    0x03000000, 		/* sethi %hi(stat), %g1		*/
1124*0Sstevel@tonic-gate 	    0xc4586000,			/* ldx   [%g1 + %lo(stat)], %g2	*/
1125*0Sstevel@tonic-gate 	    0x8400a001,			/* add   %g2, 1, %g2		*/
1126*0Sstevel@tonic-gate 	    0xc4706000,			/* stx   %g2, [%g1 + %lo(stat)]	*/
1127*0Sstevel@tonic-gate 	    0x85524000,			/* rdpr  %cwp, %g2		*/
1128*0Sstevel@tonic-gate 	    0x87518000,			/* rdpr  %pstate, %g3		*/
1129*0Sstevel@tonic-gate 	    0x8728f008,			/* sllx  %g3, 8, %g3		*/
1130*0Sstevel@tonic-gate 	    0x84108003,			/* or    %g2, %g3, %g2		*/
1131*0Sstevel@tonic-gate 	    0x8740c000,			/* rd    %asi, %g3		*/
1132*0Sstevel@tonic-gate 	    0x8728f018,			/* sllx  %g3, 24, %g3		*/
1133*0Sstevel@tonic-gate 	    0x84108003,			/* or    %g2, %g3, %g2		*/
1134*0Sstevel@tonic-gate 	    0x8350c000,			/* rdpr  %tt, %g1		*/
1135*0Sstevel@tonic-gate 	    0x8f902002,			/* wrpr  %g0, 2, %tl		*/
1136*0Sstevel@tonic-gate 	    0x85908000,			/* wrpr  %g2, %g0, %tstate	*/
1137*0Sstevel@tonic-gate 	    0x87904000,			/* wrpr  %g1, %g0, %tt		*/
1138*0Sstevel@tonic-gate 	    0xc2d80000,			/* ldxa  [%g0]ASI_MMU, %g1	*/
1139*0Sstevel@tonic-gate 	    0x83307030,			/* srlx  %g1, CTXSHIFT, %g1	*/
1140*0Sstevel@tonic-gate 	    0x02c04004,			/* brz,pn %g1, .+0x10		*/
1141*0Sstevel@tonic-gate 	    0x03000000, 		/* sethi %hi(new_tpc), %g1	*/
1142*0Sstevel@tonic-gate 	    0x82106000,			/* or    %g1, %lo(new_tpc), %g1	*/
1143*0Sstevel@tonic-gate 	    0x30800002,			/* ba,a  .+0x8			*/
1144*0Sstevel@tonic-gate 	    0x82106000,			/* or    %g1, %lo(new_tpc), %g1	*/
1145*0Sstevel@tonic-gate 	    0x81904000,			/* wrpr  %g1, %g0, %tpc		*/
1146*0Sstevel@tonic-gate 	    0x82006004,			/* add   %g1, 4, %g1		*/
1147*0Sstevel@tonic-gate 	    0x83904000,			/* wrpr  %g1, %g0, %tnpc	*/
1148*0Sstevel@tonic-gate 	    0x03000000, 		/* sethi %hi(tmptick), %g1	*/
1149*0Sstevel@tonic-gate 	    0x85410000,			/* rd    %tick, %g2		*/
1150*0Sstevel@tonic-gate 	    0xc4706000,			/* stx   %g2, [%g1 + %lo(tmptick)] */
1151*0Sstevel@tonic-gate 	    0x30800000,			/* ba,a  addr			*/
1152*0Sstevel@tonic-gate 	    NOP, NOP, NOP
1153*0Sstevel@tonic-gate #else /* sun4v */
1154*0Sstevel@tonic-gate 	    0x03000000, 		/* sethi %hi(stat), %g1		*/
1155*0Sstevel@tonic-gate 	    0xc4586000,			/* ldx   [%g1 + %lo(stat)], %g2	*/
1156*0Sstevel@tonic-gate 	    0x8400a001,			/* add   %g2, 1, %g2		*/
1157*0Sstevel@tonic-gate 	    0xc4706000,			/* stx   %g2, [%g1 + %lo(stat)]	*/
1158*0Sstevel@tonic-gate 	    0x85524000,			/* rdpr  %cwp, %g2		*/
1159*0Sstevel@tonic-gate 	    0x87518000,			/* rdpr  %pstate, %g3		*/
1160*0Sstevel@tonic-gate 	    0x8728f008,			/* sllx  %g3, 8, %g3		*/
1161*0Sstevel@tonic-gate 	    0x84108003,			/* or    %g2, %g3, %g2		*/
1162*0Sstevel@tonic-gate 	    0x8740c000,			/* rd    %asi, %g3		*/
1163*0Sstevel@tonic-gate 	    0x03000040,			/* sethi %hi(0x10000), %g1	*/
1164*0Sstevel@tonic-gate 	    0x86104003,			/* or    %g1, %g3, %g3		*/
1165*0Sstevel@tonic-gate 	    0x8728f018,			/* sllx  %g3, 24, %g3		*/
1166*0Sstevel@tonic-gate 	    0x84108003,			/* or    %g2, %g3, %g2		*/
1167*0Sstevel@tonic-gate 	    0x8350c000,			/* rdpr  %tt, %g1		*/
1168*0Sstevel@tonic-gate 	    0x8f902002,			/* wrpr  %g0, 2, %tl		*/
1169*0Sstevel@tonic-gate 	    0x85908000,			/* wrpr  %g2, %g0, %tstate	*/
1170*0Sstevel@tonic-gate 	    0x87904000,			/* wrpr  %g1, %g0, %tt		*/
1171*0Sstevel@tonic-gate 	    0xa1902001,			/* wrpr  %g0, 1, %gl		*/
1172*0Sstevel@tonic-gate 	    0xc2d80400,			/* ldxa  [%g0]ASI_SCRATCHPAD, %g1 */
1173*0Sstevel@tonic-gate 	    0xc2586000,			/* ldx  [%g1 + MMFSA_?_CTX], %g1 */
1174*0Sstevel@tonic-gate 	    0x02c04004,			/* brz,pn %g1, .+0x10		*/
1175*0Sstevel@tonic-gate 	    0x03000000, 		/* sethi %hi(new_tpc), %g1	*/
1176*0Sstevel@tonic-gate 	    0x82106000,			/* or    %g1, %lo(new_tpc), %g1	*/
1177*0Sstevel@tonic-gate 	    0x30800002,			/* ba,a  .+0x8			*/
1178*0Sstevel@tonic-gate 	    0x82106000,			/* or    %g1, %lo(new_tpc), %g1	*/
1179*0Sstevel@tonic-gate 	    0x81904000,			/* wrpr  %g1, %g0, %tpc		*/
1180*0Sstevel@tonic-gate 	    0x82006004,			/* add   %g1, 4, %g1		*/
1181*0Sstevel@tonic-gate 	    0x83904000,			/* wrpr  %g1, %g0, %tnpc	*/
1182*0Sstevel@tonic-gate 	    0x03000000, 		/* sethi %hi(tmptick), %g1	*/
1183*0Sstevel@tonic-gate 	    0x85410000,			/* rd    %tick, %g2		*/
1184*0Sstevel@tonic-gate 	    0xc4706000,			/* stx   %g2, [%g1 + %lo(tmptick)] */
1185*0Sstevel@tonic-gate 	    0x30800000			/* ba,a  addr			*/
1186*0Sstevel@tonic-gate #endif /* sun4v */
1187*0Sstevel@tonic-gate 	};
1188*0Sstevel@tonic-gate 
1189*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tstat_lock));
1190*0Sstevel@tonic-gate 	ASSERT(entno == TSTAT_ENT_ITLBMISS || entno == TSTAT_ENT_DTLBMISS);
1191*0Sstevel@tonic-gate 
1192*0Sstevel@tonic-gate 	stat = TSTAT_DATA_OFFS(tcpu, tdata_traps) + entoffs;
1193*0Sstevel@tonic-gate 	tmptick = TSTAT_DATA_OFFS(tcpu, tdata_tmptick);
1194*0Sstevel@tonic-gate 
1195*0Sstevel@tonic-gate 	if (itlb) {
1196*0Sstevel@tonic-gate 		ret = &tcpu->tcpu_instr->tinst_itlbret;
1197*0Sstevel@tonic-gate 		udata = &data->tpgsz_user.tmode_itlb;
1198*0Sstevel@tonic-gate 		kdata = &data->tpgsz_kernel.tmode_itlb;
1199*0Sstevel@tonic-gate 		tpc = TSTAT_INSTR_OFFS(tcpu, tinst_itlbret.ttlbr_ktlb);
1200*0Sstevel@tonic-gate 	} else {
1201*0Sstevel@tonic-gate 		ret = &tcpu->tcpu_instr->tinst_dtlbret;
1202*0Sstevel@tonic-gate 		udata = &data->tpgsz_user.tmode_dtlb;
1203*0Sstevel@tonic-gate 		kdata = &data->tpgsz_kernel.tmode_dtlb;
1204*0Sstevel@tonic-gate 		tpc = TSTAT_INSTR_OFFS(tcpu, tinst_dtlbret.ttlbr_ktlb);
1205*0Sstevel@tonic-gate 	}
1206*0Sstevel@tonic-gate 
1207*0Sstevel@tonic-gate 	utpc = tpc + offsetof(tstat_tlbret_t, ttlbr_utlb) -
1208*0Sstevel@tonic-gate 	    offsetof(tstat_tlbret_t, ttlbr_ktlb);
1209*0Sstevel@tonic-gate 
1210*0Sstevel@tonic-gate 	ASSERT(HI22(tpc) == HI22(utpc));
1211*0Sstevel@tonic-gate 
1212*0Sstevel@tonic-gate 	ent = (uint32_t *)((uintptr_t)tcpu->tcpu_instr + entoffs);
1213*0Sstevel@tonic-gate 	orig = KERNELBASE + entoffs;
1214*0Sstevel@tonic-gate 	va = (uintptr_t)tcpu->tcpu_ibase + entoffs;
1215*0Sstevel@tonic-gate 	baoffs = TSTAT_TLBENT_BA * sizeof (uint32_t);
1216*0Sstevel@tonic-gate 
1217*0Sstevel@tonic-gate 	bcopy(tlbent, ent, sizeof (tlbent));
1218*0Sstevel@tonic-gate 
1219*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_STATHI] |= HI22(stat);
1220*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_STATLO_LD] |= LO10(stat);
1221*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_STATLO_ST] |= LO10(stat);
1222*0Sstevel@tonic-gate #ifndef sun4v
1223*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_MMUASI] |= asi;
1224*0Sstevel@tonic-gate #else
1225*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_TAGTARGET] |= tagtarget_off;
1226*0Sstevel@tonic-gate #endif
1227*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_TPCHI] |= HI22(tpc);
1228*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_TPCLO_USER] |= LO10(utpc);
1229*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_TPCLO_KERN] |= LO10(tpc);
1230*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_TSHI] |= HI22(tmptick);
1231*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_TSLO] |= LO10(tmptick);
1232*0Sstevel@tonic-gate 	ent[TSTAT_TLBENT_BA] |= DISP22(va + baoffs, orig);
1233*0Sstevel@tonic-gate 
1234*0Sstevel@tonic-gate 	/*
1235*0Sstevel@tonic-gate 	 * And now set up the TLB return entries.
1236*0Sstevel@tonic-gate 	 */
1237*0Sstevel@tonic-gate 	trapstat_tlbretent(tcpu, &ret->ttlbr_ktlb, &kdata->ttlb_tlb);
1238*0Sstevel@tonic-gate 	trapstat_tlbretent(tcpu, &ret->ttlbr_ktsb, &kdata->ttlb_tsb);
1239*0Sstevel@tonic-gate 	trapstat_tlbretent(tcpu, &ret->ttlbr_utlb, &udata->ttlb_tlb);
1240*0Sstevel@tonic-gate 	trapstat_tlbretent(tcpu, &ret->ttlbr_utsb, &udata->ttlb_tsb);
1241*0Sstevel@tonic-gate }
1242*0Sstevel@tonic-gate 
1243*0Sstevel@tonic-gate #undef TSTAT_TLBENT_STATHI
1244*0Sstevel@tonic-gate #undef TSTAT_TLBENT_STATLO_LD
1245*0Sstevel@tonic-gate #undef TSTAT_TLBENT_STATLO_ST
1246*0Sstevel@tonic-gate #ifndef sun4v
1247*0Sstevel@tonic-gate #undef TSTAT_TLBENT_MMUASI
1248*0Sstevel@tonic-gate #else
1249*0Sstevel@tonic-gate #undef TSTAT_TLBENT_TAGTARGET
1250*0Sstevel@tonic-gate #endif
1251*0Sstevel@tonic-gate #undef TSTAT_TLBENT_TPCHI
1252*0Sstevel@tonic-gate #undef TSTAT_TLBENT_TPCLO_USER
1253*0Sstevel@tonic-gate #undef TSTAT_TLBENT_TPCLO_KERN
1254*0Sstevel@tonic-gate #undef TSTAT_TLBENT_TSHI
1255*0Sstevel@tonic-gate #undef TSTAT_TLBENT_TSLO
1256*0Sstevel@tonic-gate #undef TSTAT_TLBENT_BA
1257*0Sstevel@tonic-gate 
1258*0Sstevel@tonic-gate /*
1259*0Sstevel@tonic-gate  * The TSTAT_ENABLED_* constants define offsets in the enabled entry; the
1260*0Sstevel@tonic-gate  * TSTAT_DISABLED_BA constant defines an offset in the disabled entry.  Both
1261*0Sstevel@tonic-gate  * sets of constants are used only in trapstat_make_traptab() (below) and
1262*0Sstevel@tonic-gate  * #undef'd immediately afterwards.  Any change to "enabled" or "disabled"
1263*0Sstevel@tonic-gate  * in trapstat_make_traptab() will likely require changes to these constants.
1264*0Sstevel@tonic-gate  */
1265*0Sstevel@tonic-gate #define	TSTAT_ENABLED_STATHI	0
1266*0Sstevel@tonic-gate #define	TSTAT_ENABLED_STATLO_LD	1
1267*0Sstevel@tonic-gate #define	TSTAT_ENABLED_STATLO_ST 3
1268*0Sstevel@tonic-gate #define	TSTAT_ENABLED_BA	4
1269*0Sstevel@tonic-gate #define	TSTAT_DISABLED_BA	0
1270*0Sstevel@tonic-gate 
1271*0Sstevel@tonic-gate static void
1272*0Sstevel@tonic-gate trapstat_make_traptab(tstat_percpu_t *tcpu)
1273*0Sstevel@tonic-gate {
1274*0Sstevel@tonic-gate 	uint32_t *ent;
1275*0Sstevel@tonic-gate 	uint64_t *stat;
1276*0Sstevel@tonic-gate 	uintptr_t orig, va, en_baoffs, dis_baoffs;
1277*0Sstevel@tonic-gate 	int nent;
1278*0Sstevel@tonic-gate 
1279*0Sstevel@tonic-gate 	/*
1280*0Sstevel@tonic-gate 	 * This is the entry in the interposing trap table for enabled trap
1281*0Sstevel@tonic-gate 	 * table entries.  It loads a counter, increments it and stores it
1282*0Sstevel@tonic-gate 	 * back before branching to the actual trap table entry.
1283*0Sstevel@tonic-gate 	 */
1284*0Sstevel@tonic-gate 	static const uint32_t enabled[TSTAT_ENT_NINSTR] = {
1285*0Sstevel@tonic-gate 	    0x03000000, 		/* sethi %hi(stat), %g1		*/
1286*0Sstevel@tonic-gate 	    0xc4586000,			/* ldx   [%g1 + %lo(stat)], %g2	*/
1287*0Sstevel@tonic-gate 	    0x8400a001,			/* add   %g2, 1, %g2		*/
1288*0Sstevel@tonic-gate 	    0xc4706000,			/* stx   %g2, [%g1 + %lo(stat)]	*/
1289*0Sstevel@tonic-gate 	    0x30800000,			/* ba,a addr			*/
1290*0Sstevel@tonic-gate 	    NOP, NOP, NOP
1291*0Sstevel@tonic-gate 	};
1292*0Sstevel@tonic-gate 
1293*0Sstevel@tonic-gate 	/*
1294*0Sstevel@tonic-gate 	 * This is the entry in the interposing trap table for disabled trap
1295*0Sstevel@tonic-gate 	 * table entries.  It simply branches to the actual, underlying trap
1296*0Sstevel@tonic-gate 	 * table entry.  As explained in the "Implementation Details" section
1297*0Sstevel@tonic-gate 	 * of the block comment, all TL>0 traps _must_ use the disabled entry;
1298*0Sstevel@tonic-gate 	 * additional entries may be explicitly disabled through the use
1299*0Sstevel@tonic-gate 	 * of TSTATIOC_ENTRY/TSTATIOC_NOENTRY.
1300*0Sstevel@tonic-gate 	 */
1301*0Sstevel@tonic-gate 	static const uint32_t disabled[TSTAT_ENT_NINSTR] = {
1302*0Sstevel@tonic-gate 	    0x30800000,			/* ba,a addr			*/
1303*0Sstevel@tonic-gate 	    NOP, NOP, NOP, NOP, NOP, NOP, NOP,
1304*0Sstevel@tonic-gate 	};
1305*0Sstevel@tonic-gate 
1306*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tstat_lock));
1307*0Sstevel@tonic-gate 
1308*0Sstevel@tonic-gate 	ent = tcpu->tcpu_instr->tinst_traptab;
1309*0Sstevel@tonic-gate 	stat = (uint64_t *)TSTAT_DATA_OFFS(tcpu, tdata_traps);
1310*0Sstevel@tonic-gate 	orig = KERNELBASE;
1311*0Sstevel@tonic-gate 	va = (uintptr_t)tcpu->tcpu_ibase;
1312*0Sstevel@tonic-gate 	en_baoffs = TSTAT_ENABLED_BA * sizeof (uint32_t);
1313*0Sstevel@tonic-gate 	dis_baoffs = TSTAT_DISABLED_BA * sizeof (uint32_t);
1314*0Sstevel@tonic-gate 
1315*0Sstevel@tonic-gate 	for (nent = 0; nent < TSTAT_TOTAL_NENT; nent++) {
1316*0Sstevel@tonic-gate 		if (tstat_enabled[nent]) {
1317*0Sstevel@tonic-gate 			bcopy(enabled, ent, sizeof (enabled));
1318*0Sstevel@tonic-gate 			ent[TSTAT_ENABLED_STATHI] |= HI22(stat);
1319*0Sstevel@tonic-gate 			ent[TSTAT_ENABLED_STATLO_LD] |= LO10(stat);
1320*0Sstevel@tonic-gate 			ent[TSTAT_ENABLED_STATLO_ST] |= LO10(stat);
1321*0Sstevel@tonic-gate 			ent[TSTAT_ENABLED_BA] |= DISP22(va + en_baoffs, orig);
1322*0Sstevel@tonic-gate 		} else {
1323*0Sstevel@tonic-gate 			bcopy(disabled, ent, sizeof (disabled));
1324*0Sstevel@tonic-gate 			ent[TSTAT_DISABLED_BA] |= DISP22(va + dis_baoffs, orig);
1325*0Sstevel@tonic-gate 		}
1326*0Sstevel@tonic-gate 
1327*0Sstevel@tonic-gate 		stat++;
1328*0Sstevel@tonic-gate 		orig += sizeof (enabled);
1329*0Sstevel@tonic-gate 		ent += sizeof (enabled) / sizeof (*ent);
1330*0Sstevel@tonic-gate 		va += sizeof (enabled);
1331*0Sstevel@tonic-gate 	}
1332*0Sstevel@tonic-gate }
1333*0Sstevel@tonic-gate 
1334*0Sstevel@tonic-gate #undef TSTAT_ENABLED_STATHI
1335*0Sstevel@tonic-gate #undef TSTAT_ENABLED_STATLO_LD
1336*0Sstevel@tonic-gate #undef TSTAT_ENABLED_STATLO_ST
1337*0Sstevel@tonic-gate #undef TSTAT_ENABLED_BA
1338*0Sstevel@tonic-gate #undef TSTAT_DISABLED_BA
1339*0Sstevel@tonic-gate 
1340*0Sstevel@tonic-gate static void
1341*0Sstevel@tonic-gate trapstat_setup(processorid_t cpu)
1342*0Sstevel@tonic-gate {
1343*0Sstevel@tonic-gate 	tstat_percpu_t *tcpu = &tstat_percpu[cpu];
1344*0Sstevel@tonic-gate #ifndef sun4v
1345*0Sstevel@tonic-gate 	int i;
1346*0Sstevel@tonic-gate 	caddr_t va;
1347*0Sstevel@tonic-gate 	pfn_t *pfn;
1348*0Sstevel@tonic-gate #endif
1349*0Sstevel@tonic-gate 
1350*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_pfn == NULL);
1351*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_instr == NULL);
1352*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_data == NULL);
1353*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED);
1354*0Sstevel@tonic-gate 	ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED));
1355*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1356*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tstat_lock));
1357*0Sstevel@tonic-gate 
1358*0Sstevel@tonic-gate 	/*
1359*0Sstevel@tonic-gate 	 * The lower fifteen bits of the %tba are always read as zero; we must
1360*0Sstevel@tonic-gate 	 * align our instruction base address appropriately.
1361*0Sstevel@tonic-gate 	 */
1362*0Sstevel@tonic-gate #ifndef sun4v
1363*0Sstevel@tonic-gate 	tcpu->tcpu_ibase = (caddr_t)((KERNELBASE - tstat_total_size)
1364*0Sstevel@tonic-gate 		& TSTAT_TBA_MASK);
1365*0Sstevel@tonic-gate 	tcpu->tcpu_dbase = tcpu->tcpu_ibase + TSTAT_INSTR_SIZE;
1366*0Sstevel@tonic-gate 	tcpu->tcpu_vabase = tcpu->tcpu_ibase;
1367*0Sstevel@tonic-gate 
1368*0Sstevel@tonic-gate 	tcpu->tcpu_pfn = vmem_alloc(tstat_arena, tstat_total_pages, VM_SLEEP);
1369*0Sstevel@tonic-gate 	bzero(tcpu->tcpu_pfn, tstat_total_pages);
1370*0Sstevel@tonic-gate 	pfn = tcpu->tcpu_pfn;
1371*0Sstevel@tonic-gate 
1372*0Sstevel@tonic-gate 	tcpu->tcpu_instr = vmem_alloc(tstat_arena, TSTAT_INSTR_SIZE, VM_SLEEP);
1373*0Sstevel@tonic-gate 
1374*0Sstevel@tonic-gate 	va = (caddr_t)tcpu->tcpu_instr;
1375*0Sstevel@tonic-gate 	for (i = 0; i < TSTAT_INSTR_PAGES; i++, va += MMU_PAGESIZE)
1376*0Sstevel@tonic-gate 		*pfn++ = hat_getpfnum(kas.a_hat, va);
1377*0Sstevel@tonic-gate 
1378*0Sstevel@tonic-gate 	/*
1379*0Sstevel@tonic-gate 	 * We must be sure that the pages that we will use to examine the data
1380*0Sstevel@tonic-gate 	 * have the same virtual color as the pages to which the data is being
1381*0Sstevel@tonic-gate 	 * recorded, hence the alignment and phase constraints on the
1382*0Sstevel@tonic-gate 	 * allocation.
1383*0Sstevel@tonic-gate 	 */
1384*0Sstevel@tonic-gate 	tcpu->tcpu_data = vmem_xalloc(tstat_arena, tstat_data_size,
1385*0Sstevel@tonic-gate 	    shm_alignment, (uintptr_t)tcpu->tcpu_dbase & (shm_alignment - 1),
1386*0Sstevel@tonic-gate 	    0, 0, NULL, VM_SLEEP);
1387*0Sstevel@tonic-gate 	bzero(tcpu->tcpu_data, tstat_data_size);
1388*0Sstevel@tonic-gate 	tcpu->tcpu_data->tdata_cpuid = cpu;
1389*0Sstevel@tonic-gate 
1390*0Sstevel@tonic-gate 	va = (caddr_t)tcpu->tcpu_data;
1391*0Sstevel@tonic-gate 	for (i = 0; i < tstat_data_pages; i++, va += MMU_PAGESIZE)
1392*0Sstevel@tonic-gate 		*pfn++ = hat_getpfnum(kas.a_hat, va);
1393*0Sstevel@tonic-gate #else /* sun4v */
1394*0Sstevel@tonic-gate 	ASSERT(!(tstat_total_size > (1 + ~TSTAT_TBA_MASK)));
1395*0Sstevel@tonic-gate 	tcpu->tcpu_vabase = (caddr_t)(KERNELBASE - MMU_PAGESIZE4M);
1396*0Sstevel@tonic-gate 	tcpu->tcpu_ibase = tcpu->tcpu_vabase + (cpu * (1 + ~TSTAT_TBA_MASK));
1397*0Sstevel@tonic-gate 	tcpu->tcpu_dbase = tcpu->tcpu_ibase + TSTAT_INSTR_SIZE;
1398*0Sstevel@tonic-gate 
1399*0Sstevel@tonic-gate 	tcpu->tcpu_pfn = &tstat_pfn;
1400*0Sstevel@tonic-gate 	tcpu->tcpu_instr = (tstat_instr_t *)(tstat_va + (cpu *
1401*0Sstevel@tonic-gate 		(1 + ~TSTAT_TBA_MASK)));
1402*0Sstevel@tonic-gate 	tcpu->tcpu_data = (tstat_data_t *)(tstat_va + (cpu *
1403*0Sstevel@tonic-gate 		(1 + ~TSTAT_TBA_MASK)) + TSTAT_INSTR_SIZE);
1404*0Sstevel@tonic-gate 	bzero(tcpu->tcpu_data, tstat_data_size);
1405*0Sstevel@tonic-gate 	tcpu->tcpu_data->tdata_cpuid = cpu;
1406*0Sstevel@tonic-gate #endif /* sun4v */
1407*0Sstevel@tonic-gate 
1408*0Sstevel@tonic-gate 	/*
1409*0Sstevel@tonic-gate 	 * Now that we have all of the instruction and data pages allocated,
1410*0Sstevel@tonic-gate 	 * make the trap table from scratch.
1411*0Sstevel@tonic-gate 	 */
1412*0Sstevel@tonic-gate 	trapstat_make_traptab(tcpu);
1413*0Sstevel@tonic-gate 
1414*0Sstevel@tonic-gate 	if (tstat_options & TSTAT_OPT_TLBDATA) {
1415*0Sstevel@tonic-gate 		/*
1416*0Sstevel@tonic-gate 		 * TLB Statistics have been specified; set up the I- and D-TLB
1417*0Sstevel@tonic-gate 		 * entries and corresponding TLB return entries.
1418*0Sstevel@tonic-gate 		 */
1419*0Sstevel@tonic-gate 		trapstat_tlbent(tcpu, TSTAT_ENT_ITLBMISS);
1420*0Sstevel@tonic-gate 		trapstat_tlbent(tcpu, TSTAT_ENT_DTLBMISS);
1421*0Sstevel@tonic-gate 	}
1422*0Sstevel@tonic-gate 
1423*0Sstevel@tonic-gate 	tcpu->tcpu_flags |= TSTAT_CPU_ALLOCATED;
1424*0Sstevel@tonic-gate 
1425*0Sstevel@tonic-gate 	/*
1426*0Sstevel@tonic-gate 	 * Finally, get the target CPU to load the locked pages into its TLBs.
1427*0Sstevel@tonic-gate 	 */
1428*0Sstevel@tonic-gate 	xc_one(cpu, (xcfunc_t *)trapstat_load_tlb, 0, 0);
1429*0Sstevel@tonic-gate }
1430*0Sstevel@tonic-gate 
1431*0Sstevel@tonic-gate static void
1432*0Sstevel@tonic-gate trapstat_teardown(processorid_t cpu)
1433*0Sstevel@tonic-gate {
1434*0Sstevel@tonic-gate 	tstat_percpu_t *tcpu = &tstat_percpu[cpu];
1435*0Sstevel@tonic-gate #ifndef sun4v
1436*0Sstevel@tonic-gate 	int i;
1437*0Sstevel@tonic-gate #endif
1438*0Sstevel@tonic-gate 	caddr_t va = tcpu->tcpu_vabase;
1439*0Sstevel@tonic-gate 
1440*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_pfn != NULL);
1441*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_instr != NULL);
1442*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_data != NULL);
1443*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED);
1444*0Sstevel@tonic-gate 	ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
1445*0Sstevel@tonic-gate 	ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED));
1446*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1447*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&tstat_lock));
1448*0Sstevel@tonic-gate 
1449*0Sstevel@tonic-gate #ifndef sun4v
1450*0Sstevel@tonic-gate 	vmem_free(tstat_arena, tcpu->tcpu_pfn, tstat_total_pages);
1451*0Sstevel@tonic-gate 	vmem_free(tstat_arena, tcpu->tcpu_instr, TSTAT_INSTR_SIZE);
1452*0Sstevel@tonic-gate 	vmem_free(tstat_arena, tcpu->tcpu_data, tstat_data_size);
1453*0Sstevel@tonic-gate 
1454*0Sstevel@tonic-gate 	for (i = 0; i < tstat_total_pages; i++, va += MMU_PAGESIZE) {
1455*0Sstevel@tonic-gate 		xt_one(cpu, vtag_flushpage_tl1, (uint64_t)va, KCONTEXT);
1456*0Sstevel@tonic-gate 	}
1457*0Sstevel@tonic-gate #else
1458*0Sstevel@tonic-gate 	xt_one(cpu, vtag_unmap_perm_tl1, (uint64_t)va, KCONTEXT);
1459*0Sstevel@tonic-gate #endif
1460*0Sstevel@tonic-gate 
1461*0Sstevel@tonic-gate 	tcpu->tcpu_pfn = NULL;
1462*0Sstevel@tonic-gate 	tcpu->tcpu_instr = NULL;
1463*0Sstevel@tonic-gate 	tcpu->tcpu_data = NULL;
1464*0Sstevel@tonic-gate 	tcpu->tcpu_flags &= ~TSTAT_CPU_ALLOCATED;
1465*0Sstevel@tonic-gate }
1466*0Sstevel@tonic-gate 
1467*0Sstevel@tonic-gate static int
1468*0Sstevel@tonic-gate trapstat_go()
1469*0Sstevel@tonic-gate {
1470*0Sstevel@tonic-gate 	cpu_t *cp;
1471*0Sstevel@tonic-gate 
1472*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
1473*0Sstevel@tonic-gate 	mutex_enter(&tstat_lock);
1474*0Sstevel@tonic-gate 
1475*0Sstevel@tonic-gate 	if (tstat_running) {
1476*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1477*0Sstevel@tonic-gate 		mutex_exit(&cpu_lock);
1478*0Sstevel@tonic-gate 		return (EBUSY);
1479*0Sstevel@tonic-gate 	}
1480*0Sstevel@tonic-gate 
1481*0Sstevel@tonic-gate #ifdef sun4v
1482*0Sstevel@tonic-gate 	/*
1483*0Sstevel@tonic-gate 	 * Allocate large page to hold interposing tables
1484*0Sstevel@tonic-gate 	 */
1485*0Sstevel@tonic-gate 	tstat_va = contig_mem_alloc(MMU_PAGESIZE4M);
1486*0Sstevel@tonic-gate 	tstat_pfn = va_to_pfn(tstat_va);
1487*0Sstevel@tonic-gate 	if (tstat_pfn == PFN_INVALID) {
1488*0Sstevel@tonic-gate 		contig_mem_free(tstat_va, MMU_PAGESIZE4M);
1489*0Sstevel@tonic-gate 		return (EAGAIN);
1490*0Sstevel@tonic-gate 	}
1491*0Sstevel@tonic-gate #endif
1492*0Sstevel@tonic-gate 
1493*0Sstevel@tonic-gate 	/*
1494*0Sstevel@tonic-gate 	 * First, perform any necessary hot patching.
1495*0Sstevel@tonic-gate 	 */
1496*0Sstevel@tonic-gate 	trapstat_hotpatch();
1497*0Sstevel@tonic-gate 
1498*0Sstevel@tonic-gate 	/*
1499*0Sstevel@tonic-gate 	 * Allocate the resources we'll need to measure probe effect.
1500*0Sstevel@tonic-gate 	 */
1501*0Sstevel@tonic-gate 	trapstat_probe_alloc();
1502*0Sstevel@tonic-gate 
1503*0Sstevel@tonic-gate 
1504*0Sstevel@tonic-gate 	cp = cpu_list;
1505*0Sstevel@tonic-gate 	do {
1506*0Sstevel@tonic-gate 		if (!(tstat_percpu[cp->cpu_id].tcpu_flags & TSTAT_CPU_SELECTED))
1507*0Sstevel@tonic-gate 			continue;
1508*0Sstevel@tonic-gate 
1509*0Sstevel@tonic-gate 		trapstat_setup(cp->cpu_id);
1510*0Sstevel@tonic-gate 
1511*0Sstevel@tonic-gate 		/*
1512*0Sstevel@tonic-gate 		 * Note that due to trapstat_probe()'s use of global data,
1513*0Sstevel@tonic-gate 		 * we determine the probe effect on each CPU serially instead
1514*0Sstevel@tonic-gate 		 * of in parallel with an xc_all().
1515*0Sstevel@tonic-gate 		 */
1516*0Sstevel@tonic-gate 		xc_one(cp->cpu_id, (xcfunc_t *)trapstat_probe, 0, 0);
1517*0Sstevel@tonic-gate 	} while ((cp = cp->cpu_next) != cpu_list);
1518*0Sstevel@tonic-gate 
1519*0Sstevel@tonic-gate 	xc_all((xcfunc_t *)trapstat_enable, 0, 0);
1520*0Sstevel@tonic-gate 
1521*0Sstevel@tonic-gate 	trapstat_probe_free();
1522*0Sstevel@tonic-gate 	tstat_running = 1;
1523*0Sstevel@tonic-gate 	mutex_exit(&tstat_lock);
1524*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
1525*0Sstevel@tonic-gate 
1526*0Sstevel@tonic-gate 	return (0);
1527*0Sstevel@tonic-gate }
1528*0Sstevel@tonic-gate 
1529*0Sstevel@tonic-gate static int
1530*0Sstevel@tonic-gate trapstat_stop()
1531*0Sstevel@tonic-gate {
1532*0Sstevel@tonic-gate 	int i;
1533*0Sstevel@tonic-gate 
1534*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
1535*0Sstevel@tonic-gate 	mutex_enter(&tstat_lock);
1536*0Sstevel@tonic-gate 	if (!tstat_running) {
1537*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1538*0Sstevel@tonic-gate 		mutex_exit(&cpu_lock);
1539*0Sstevel@tonic-gate 		return (ENXIO);
1540*0Sstevel@tonic-gate 	}
1541*0Sstevel@tonic-gate 
1542*0Sstevel@tonic-gate 	xc_all((xcfunc_t *)trapstat_disable, 0, 0);
1543*0Sstevel@tonic-gate 
1544*0Sstevel@tonic-gate 	for (i = 0; i <= max_cpuid; i++) {
1545*0Sstevel@tonic-gate 		if (tstat_percpu[i].tcpu_flags & TSTAT_CPU_ALLOCATED)
1546*0Sstevel@tonic-gate 			trapstat_teardown(i);
1547*0Sstevel@tonic-gate 	}
1548*0Sstevel@tonic-gate 
1549*0Sstevel@tonic-gate #ifdef sun4v
1550*0Sstevel@tonic-gate 	contig_mem_free(tstat_va, MMU_PAGESIZE4M);
1551*0Sstevel@tonic-gate #endif
1552*0Sstevel@tonic-gate 	trapstat_hotpatch();
1553*0Sstevel@tonic-gate 	tstat_running = 0;
1554*0Sstevel@tonic-gate 	mutex_exit(&tstat_lock);
1555*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
1556*0Sstevel@tonic-gate 
1557*0Sstevel@tonic-gate 	return (0);
1558*0Sstevel@tonic-gate }
1559*0Sstevel@tonic-gate 
1560*0Sstevel@tonic-gate /*
1561*0Sstevel@tonic-gate  * This is trapstat's DR CPU configuration callback.  It's called (with
1562*0Sstevel@tonic-gate  * cpu_lock held) to unconfigure a newly powered-off CPU, or to configure a
1563*0Sstevel@tonic-gate  * powered-off CPU that is to be brought into the system.  We need only take
1564*0Sstevel@tonic-gate  * action in the unconfigure case:  because a powered-off CPU will have its
1565*0Sstevel@tonic-gate  * trap table restored to KERNELBASE if it is ever powered back on, we must
1566*0Sstevel@tonic-gate  * update the flags to reflect that trapstat is no longer enabled on the
1567*0Sstevel@tonic-gate  * powered-off CPU.  Note that this means that a TSTAT_CPU_ENABLED CPU that
1568*0Sstevel@tonic-gate  * is unconfigured/powered off and later powered back on/reconfigured will
1569*0Sstevel@tonic-gate  * _not_ be re-TSTAT_CPU_ENABLED.
1570*0Sstevel@tonic-gate  */
1571*0Sstevel@tonic-gate static int
1572*0Sstevel@tonic-gate trapstat_cpu_setup(cpu_setup_t what, processorid_t cpu)
1573*0Sstevel@tonic-gate {
1574*0Sstevel@tonic-gate 	tstat_percpu_t *tcpu = &tstat_percpu[cpu];
1575*0Sstevel@tonic-gate 
1576*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
1577*0Sstevel@tonic-gate 	mutex_enter(&tstat_lock);
1578*0Sstevel@tonic-gate 
1579*0Sstevel@tonic-gate 	if (!tstat_running) {
1580*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1581*0Sstevel@tonic-gate 		return (0);
1582*0Sstevel@tonic-gate 	}
1583*0Sstevel@tonic-gate 
1584*0Sstevel@tonic-gate 	switch (what) {
1585*0Sstevel@tonic-gate 	case CPU_CONFIG:
1586*0Sstevel@tonic-gate 		ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED));
1587*0Sstevel@tonic-gate 		break;
1588*0Sstevel@tonic-gate 
1589*0Sstevel@tonic-gate 	case CPU_UNCONFIG:
1590*0Sstevel@tonic-gate 		if (tcpu->tcpu_flags & TSTAT_CPU_ENABLED)
1591*0Sstevel@tonic-gate 			tcpu->tcpu_flags &= ~TSTAT_CPU_ENABLED;
1592*0Sstevel@tonic-gate 		break;
1593*0Sstevel@tonic-gate 
1594*0Sstevel@tonic-gate 	default:
1595*0Sstevel@tonic-gate 		break;
1596*0Sstevel@tonic-gate 	}
1597*0Sstevel@tonic-gate 
1598*0Sstevel@tonic-gate 	mutex_exit(&tstat_lock);
1599*0Sstevel@tonic-gate 	return (0);
1600*0Sstevel@tonic-gate }
1601*0Sstevel@tonic-gate 
1602*0Sstevel@tonic-gate /*
1603*0Sstevel@tonic-gate  * This is called before a CPR suspend and after a CPR resume.  We don't have
1604*0Sstevel@tonic-gate  * anything to do before a suspend, but after a restart we must restore the
1605*0Sstevel@tonic-gate  * trap table to be our interposing trap table.  However, we don't actually
1606*0Sstevel@tonic-gate  * know whether or not the CPUs have been powered off -- this routine may be
1607*0Sstevel@tonic-gate  * called while restoring from a failed CPR suspend.  We thus run through each
1608*0Sstevel@tonic-gate  * TSTAT_CPU_ENABLED CPU, and explicitly destroy and reestablish its
1609*0Sstevel@tonic-gate  * interposing trap table.  This assures that our state is correct regardless
1610*0Sstevel@tonic-gate  * of whether or not the CPU has been newly powered on.
1611*0Sstevel@tonic-gate  */
1612*0Sstevel@tonic-gate /*ARGSUSED*/
1613*0Sstevel@tonic-gate static boolean_t
1614*0Sstevel@tonic-gate trapstat_cpr(void *arg, int code)
1615*0Sstevel@tonic-gate {
1616*0Sstevel@tonic-gate 	cpu_t *cp;
1617*0Sstevel@tonic-gate 
1618*0Sstevel@tonic-gate 	if (code == CB_CODE_CPR_CHKPT)
1619*0Sstevel@tonic-gate 		return (B_TRUE);
1620*0Sstevel@tonic-gate 
1621*0Sstevel@tonic-gate 	ASSERT(code == CB_CODE_CPR_RESUME);
1622*0Sstevel@tonic-gate 
1623*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
1624*0Sstevel@tonic-gate 	mutex_enter(&tstat_lock);
1625*0Sstevel@tonic-gate 
1626*0Sstevel@tonic-gate 	if (!tstat_running) {
1627*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1628*0Sstevel@tonic-gate 		mutex_exit(&cpu_lock);
1629*0Sstevel@tonic-gate 		return (B_TRUE);
1630*0Sstevel@tonic-gate 	}
1631*0Sstevel@tonic-gate 
1632*0Sstevel@tonic-gate 	cp = cpu_list;
1633*0Sstevel@tonic-gate 	do {
1634*0Sstevel@tonic-gate 		tstat_percpu_t *tcpu = &tstat_percpu[cp->cpu_id];
1635*0Sstevel@tonic-gate 
1636*0Sstevel@tonic-gate 		if (!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED))
1637*0Sstevel@tonic-gate 			continue;
1638*0Sstevel@tonic-gate 
1639*0Sstevel@tonic-gate 		ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED);
1640*0Sstevel@tonic-gate 		ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
1641*0Sstevel@tonic-gate 
1642*0Sstevel@tonic-gate 		xc_one(cp->cpu_id, (xcfunc_t *)trapstat_disable, 0, 0);
1643*0Sstevel@tonic-gate 		ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED));
1644*0Sstevel@tonic-gate 
1645*0Sstevel@tonic-gate 		/*
1646*0Sstevel@tonic-gate 		 * Preserve this CPU's data in tstat_buffer and rip down its
1647*0Sstevel@tonic-gate 		 * interposing trap table.
1648*0Sstevel@tonic-gate 		 */
1649*0Sstevel@tonic-gate 		bcopy(tcpu->tcpu_data, tstat_buffer, tstat_data_t_size);
1650*0Sstevel@tonic-gate 		trapstat_teardown(cp->cpu_id);
1651*0Sstevel@tonic-gate 		ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED));
1652*0Sstevel@tonic-gate 
1653*0Sstevel@tonic-gate 		/*
1654*0Sstevel@tonic-gate 		 * Reestablish the interposing trap table and restore the old
1655*0Sstevel@tonic-gate 		 * data.
1656*0Sstevel@tonic-gate 		 */
1657*0Sstevel@tonic-gate 		trapstat_setup(cp->cpu_id);
1658*0Sstevel@tonic-gate 		ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
1659*0Sstevel@tonic-gate 		bcopy(tstat_buffer, tcpu->tcpu_data, tstat_data_t_size);
1660*0Sstevel@tonic-gate 
1661*0Sstevel@tonic-gate 		xc_one(cp->cpu_id, (xcfunc_t *)trapstat_enable, 0, 0);
1662*0Sstevel@tonic-gate 	} while ((cp = cp->cpu_next) != cpu_list);
1663*0Sstevel@tonic-gate 
1664*0Sstevel@tonic-gate 	mutex_exit(&tstat_lock);
1665*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
1666*0Sstevel@tonic-gate 
1667*0Sstevel@tonic-gate 	return (B_TRUE);
1668*0Sstevel@tonic-gate }
1669*0Sstevel@tonic-gate 
1670*0Sstevel@tonic-gate /*ARGSUSED*/
1671*0Sstevel@tonic-gate static int
1672*0Sstevel@tonic-gate trapstat_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
1673*0Sstevel@tonic-gate {
1674*0Sstevel@tonic-gate 	int i;
1675*0Sstevel@tonic-gate 
1676*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
1677*0Sstevel@tonic-gate 	mutex_enter(&tstat_lock);
1678*0Sstevel@tonic-gate 	if (tstat_open != 0) {
1679*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1680*0Sstevel@tonic-gate 		mutex_exit(&cpu_lock);
1681*0Sstevel@tonic-gate 		return (EBUSY);
1682*0Sstevel@tonic-gate 	}
1683*0Sstevel@tonic-gate 
1684*0Sstevel@tonic-gate 	/*
1685*0Sstevel@tonic-gate 	 * Register this in open() rather than in attach() to prevent deadlock
1686*0Sstevel@tonic-gate 	 * with DR code. During attach, I/O device tree locks are grabbed
1687*0Sstevel@tonic-gate 	 * before trapstat_attach() is invoked - registering in attach
1688*0Sstevel@tonic-gate 	 * will result in the lock order: device tree lock, cpu_lock.
1689*0Sstevel@tonic-gate 	 * DR code however requires that cpu_lock be acquired before
1690*0Sstevel@tonic-gate 	 * device tree locks.
1691*0Sstevel@tonic-gate 	 */
1692*0Sstevel@tonic-gate 	ASSERT(!tstat_running);
1693*0Sstevel@tonic-gate 	register_cpu_setup_func((cpu_setup_func_t *)trapstat_cpu_setup, NULL);
1694*0Sstevel@tonic-gate 
1695*0Sstevel@tonic-gate 	/*
1696*0Sstevel@tonic-gate 	 * Clear all options.  And until specific CPUs are specified, we'll
1697*0Sstevel@tonic-gate 	 * mark all CPUs as selected.
1698*0Sstevel@tonic-gate 	 */
1699*0Sstevel@tonic-gate 	tstat_options = 0;
1700*0Sstevel@tonic-gate 
1701*0Sstevel@tonic-gate 	for (i = 0; i <= max_cpuid; i++)
1702*0Sstevel@tonic-gate 		tstat_percpu[i].tcpu_flags |= TSTAT_CPU_SELECTED;
1703*0Sstevel@tonic-gate 
1704*0Sstevel@tonic-gate 	/*
1705*0Sstevel@tonic-gate 	 * By default, all traps at TL=0 are enabled.  Traps at TL>0 must
1706*0Sstevel@tonic-gate 	 * be disabled.
1707*0Sstevel@tonic-gate 	 */
1708*0Sstevel@tonic-gate 	for (i = 0; i < TSTAT_TOTAL_NENT; i++)
1709*0Sstevel@tonic-gate 		tstat_enabled[i] = i < TSTAT_NENT ? 1 : 0;
1710*0Sstevel@tonic-gate 
1711*0Sstevel@tonic-gate 	tstat_open = 1;
1712*0Sstevel@tonic-gate 	mutex_exit(&tstat_lock);
1713*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
1714*0Sstevel@tonic-gate 
1715*0Sstevel@tonic-gate 	return (0);
1716*0Sstevel@tonic-gate }
1717*0Sstevel@tonic-gate 
1718*0Sstevel@tonic-gate /*ARGSUSED*/
1719*0Sstevel@tonic-gate static int
1720*0Sstevel@tonic-gate trapstat_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
1721*0Sstevel@tonic-gate {
1722*0Sstevel@tonic-gate 	(void) trapstat_stop();
1723*0Sstevel@tonic-gate 
1724*0Sstevel@tonic-gate 	ASSERT(!tstat_running);
1725*0Sstevel@tonic-gate 
1726*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
1727*0Sstevel@tonic-gate 	unregister_cpu_setup_func((cpu_setup_func_t *)trapstat_cpu_setup, NULL);
1728*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
1729*0Sstevel@tonic-gate 
1730*0Sstevel@tonic-gate 	tstat_open = 0;
1731*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
1732*0Sstevel@tonic-gate }
1733*0Sstevel@tonic-gate 
1734*0Sstevel@tonic-gate static int
1735*0Sstevel@tonic-gate trapstat_option(int option)
1736*0Sstevel@tonic-gate {
1737*0Sstevel@tonic-gate 	mutex_enter(&tstat_lock);
1738*0Sstevel@tonic-gate 
1739*0Sstevel@tonic-gate 	if (tstat_running) {
1740*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1741*0Sstevel@tonic-gate 		return (EBUSY);
1742*0Sstevel@tonic-gate 	}
1743*0Sstevel@tonic-gate 
1744*0Sstevel@tonic-gate 	tstat_options |= option;
1745*0Sstevel@tonic-gate 	mutex_exit(&tstat_lock);
1746*0Sstevel@tonic-gate 
1747*0Sstevel@tonic-gate 	return (0);
1748*0Sstevel@tonic-gate }
1749*0Sstevel@tonic-gate 
1750*0Sstevel@tonic-gate /*ARGSUSED*/
1751*0Sstevel@tonic-gate static int
1752*0Sstevel@tonic-gate trapstat_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *crd, int *rval)
1753*0Sstevel@tonic-gate {
1754*0Sstevel@tonic-gate 	int i, j, out;
1755*0Sstevel@tonic-gate 	size_t dsize;
1756*0Sstevel@tonic-gate 
1757*0Sstevel@tonic-gate 	switch (cmd) {
1758*0Sstevel@tonic-gate 	case TSTATIOC_GO:
1759*0Sstevel@tonic-gate 		return (trapstat_go());
1760*0Sstevel@tonic-gate 
1761*0Sstevel@tonic-gate 	case TSTATIOC_NOGO:
1762*0Sstevel@tonic-gate 		return (trapstat_option(TSTAT_OPT_NOGO));
1763*0Sstevel@tonic-gate 
1764*0Sstevel@tonic-gate 	case TSTATIOC_STOP:
1765*0Sstevel@tonic-gate 		return (trapstat_stop());
1766*0Sstevel@tonic-gate 
1767*0Sstevel@tonic-gate 	case TSTATIOC_CPU:
1768*0Sstevel@tonic-gate 		if (arg < 0 || arg > max_cpuid)
1769*0Sstevel@tonic-gate 			return (EINVAL);
1770*0Sstevel@tonic-gate 		/*FALLTHROUGH*/
1771*0Sstevel@tonic-gate 
1772*0Sstevel@tonic-gate 	case TSTATIOC_NOCPU:
1773*0Sstevel@tonic-gate 		mutex_enter(&tstat_lock);
1774*0Sstevel@tonic-gate 
1775*0Sstevel@tonic-gate 		if (tstat_running) {
1776*0Sstevel@tonic-gate 			mutex_exit(&tstat_lock);
1777*0Sstevel@tonic-gate 			return (EBUSY);
1778*0Sstevel@tonic-gate 		}
1779*0Sstevel@tonic-gate 
1780*0Sstevel@tonic-gate 		/*
1781*0Sstevel@tonic-gate 		 * If this is the first CPU to be specified (or if we are
1782*0Sstevel@tonic-gate 		 * being asked to explicitly de-select CPUs), disable all CPUs.
1783*0Sstevel@tonic-gate 		 */
1784*0Sstevel@tonic-gate 		if (!(tstat_options & TSTAT_OPT_CPU) || cmd == TSTATIOC_NOCPU) {
1785*0Sstevel@tonic-gate 			tstat_options |= TSTAT_OPT_CPU;
1786*0Sstevel@tonic-gate 
1787*0Sstevel@tonic-gate 			for (i = 0; i <= max_cpuid; i++) {
1788*0Sstevel@tonic-gate 				tstat_percpu_t *tcpu = &tstat_percpu[i];
1789*0Sstevel@tonic-gate 
1790*0Sstevel@tonic-gate 				ASSERT(cmd == TSTATIOC_NOCPU ||
1791*0Sstevel@tonic-gate 				    (tcpu->tcpu_flags & TSTAT_CPU_SELECTED));
1792*0Sstevel@tonic-gate 				tcpu->tcpu_flags &= ~TSTAT_CPU_SELECTED;
1793*0Sstevel@tonic-gate 			}
1794*0Sstevel@tonic-gate 		}
1795*0Sstevel@tonic-gate 
1796*0Sstevel@tonic-gate 		if (cmd == TSTATIOC_CPU)
1797*0Sstevel@tonic-gate 			tstat_percpu[arg].tcpu_flags |= TSTAT_CPU_SELECTED;
1798*0Sstevel@tonic-gate 
1799*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1800*0Sstevel@tonic-gate 
1801*0Sstevel@tonic-gate 		return (0);
1802*0Sstevel@tonic-gate 
1803*0Sstevel@tonic-gate 	case TSTATIOC_ENTRY:
1804*0Sstevel@tonic-gate 		mutex_enter(&tstat_lock);
1805*0Sstevel@tonic-gate 
1806*0Sstevel@tonic-gate 		if (tstat_running) {
1807*0Sstevel@tonic-gate 			mutex_exit(&tstat_lock);
1808*0Sstevel@tonic-gate 			return (EBUSY);
1809*0Sstevel@tonic-gate 		}
1810*0Sstevel@tonic-gate 
1811*0Sstevel@tonic-gate 		if (arg >= TSTAT_NENT || arg < 0) {
1812*0Sstevel@tonic-gate 			mutex_exit(&tstat_lock);
1813*0Sstevel@tonic-gate 			return (EINVAL);
1814*0Sstevel@tonic-gate 		}
1815*0Sstevel@tonic-gate 
1816*0Sstevel@tonic-gate 		if (!(tstat_options & TSTAT_OPT_ENTRY)) {
1817*0Sstevel@tonic-gate 			/*
1818*0Sstevel@tonic-gate 			 * If this is the first entry that we are explicitly
1819*0Sstevel@tonic-gate 			 * enabling, explicitly disable every TL=0 entry.
1820*0Sstevel@tonic-gate 			 */
1821*0Sstevel@tonic-gate 			for (i = 0; i < TSTAT_NENT; i++)
1822*0Sstevel@tonic-gate 				tstat_enabled[i] = 0;
1823*0Sstevel@tonic-gate 
1824*0Sstevel@tonic-gate 			tstat_options |= TSTAT_OPT_ENTRY;
1825*0Sstevel@tonic-gate 		}
1826*0Sstevel@tonic-gate 
1827*0Sstevel@tonic-gate 		tstat_enabled[arg] = 1;
1828*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1829*0Sstevel@tonic-gate 		return (0);
1830*0Sstevel@tonic-gate 
1831*0Sstevel@tonic-gate 	case TSTATIOC_NOENTRY:
1832*0Sstevel@tonic-gate 		mutex_enter(&tstat_lock);
1833*0Sstevel@tonic-gate 
1834*0Sstevel@tonic-gate 		if (tstat_running) {
1835*0Sstevel@tonic-gate 			mutex_exit(&tstat_lock);
1836*0Sstevel@tonic-gate 			return (EBUSY);
1837*0Sstevel@tonic-gate 		}
1838*0Sstevel@tonic-gate 
1839*0Sstevel@tonic-gate 		for (i = 0; i < TSTAT_NENT; i++)
1840*0Sstevel@tonic-gate 			tstat_enabled[i] = 0;
1841*0Sstevel@tonic-gate 
1842*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1843*0Sstevel@tonic-gate 		return (0);
1844*0Sstevel@tonic-gate 
1845*0Sstevel@tonic-gate 	case TSTATIOC_READ:
1846*0Sstevel@tonic-gate 		mutex_enter(&tstat_lock);
1847*0Sstevel@tonic-gate 
1848*0Sstevel@tonic-gate 		if (tstat_options & TSTAT_OPT_TLBDATA) {
1849*0Sstevel@tonic-gate 			dsize = tstat_data_t_exported_size;
1850*0Sstevel@tonic-gate 		} else {
1851*0Sstevel@tonic-gate 			dsize = sizeof (tstat_data_t);
1852*0Sstevel@tonic-gate 		}
1853*0Sstevel@tonic-gate 
1854*0Sstevel@tonic-gate 		for (i = 0, out = 0; i <= max_cpuid; i++) {
1855*0Sstevel@tonic-gate 			tstat_percpu_t *tcpu = &tstat_percpu[i];
1856*0Sstevel@tonic-gate 
1857*0Sstevel@tonic-gate 			if (!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED))
1858*0Sstevel@tonic-gate 				continue;
1859*0Sstevel@tonic-gate 
1860*0Sstevel@tonic-gate 			ASSERT(tcpu->tcpu_flags & TSTAT_CPU_SELECTED);
1861*0Sstevel@tonic-gate 			ASSERT(tcpu->tcpu_flags & TSTAT_CPU_ALLOCATED);
1862*0Sstevel@tonic-gate 
1863*0Sstevel@tonic-gate 			tstat_buffer->tdata_cpuid = -1;
1864*0Sstevel@tonic-gate 			xc_one(i, (xcfunc_t *)trapstat_snapshot, 0, 0);
1865*0Sstevel@tonic-gate 
1866*0Sstevel@tonic-gate 			if (tstat_buffer->tdata_cpuid == -1) {
1867*0Sstevel@tonic-gate 				/*
1868*0Sstevel@tonic-gate 				 * This CPU is not currently responding to
1869*0Sstevel@tonic-gate 				 * cross calls; we have caught it while it is
1870*0Sstevel@tonic-gate 				 * being unconfigured.  We'll drop tstat_lock
1871*0Sstevel@tonic-gate 				 * and pick up and drop cpu_lock.  By the
1872*0Sstevel@tonic-gate 				 * time we acquire cpu_lock, the DR operation
1873*0Sstevel@tonic-gate 				 * will appear consistent and we can assert
1874*0Sstevel@tonic-gate 				 * that trapstat_cpu_setup() has cleared
1875*0Sstevel@tonic-gate 				 * TSTAT_CPU_ENABLED.
1876*0Sstevel@tonic-gate 				 */
1877*0Sstevel@tonic-gate 				mutex_exit(&tstat_lock);
1878*0Sstevel@tonic-gate 				mutex_enter(&cpu_lock);
1879*0Sstevel@tonic-gate 				mutex_exit(&cpu_lock);
1880*0Sstevel@tonic-gate 				mutex_enter(&tstat_lock);
1881*0Sstevel@tonic-gate 				ASSERT(!(tcpu->tcpu_flags & TSTAT_CPU_ENABLED));
1882*0Sstevel@tonic-gate 				continue;
1883*0Sstevel@tonic-gate 			}
1884*0Sstevel@tonic-gate 
1885*0Sstevel@tonic-gate 			/*
1886*0Sstevel@tonic-gate 			 * Need to compensate for the difference between page
1887*0Sstevel@tonic-gate 			 * sizes exported to users and page sizes available
1888*0Sstevel@tonic-gate 			 * within the kernel.
1889*0Sstevel@tonic-gate 			 */
1890*0Sstevel@tonic-gate 			if ((tstat_options & TSTAT_OPT_TLBDATA) &&
1891*0Sstevel@tonic-gate 			    (tstat_pgszs != tstat_user_pgszs)) {
1892*0Sstevel@tonic-gate 				tstat_pgszdata_t *tp;
1893*0Sstevel@tonic-gate 				uint_t szc;
1894*0Sstevel@tonic-gate 
1895*0Sstevel@tonic-gate 				tp = &tstat_buffer->tdata_pgsz[0];
1896*0Sstevel@tonic-gate 				for (j = 0; j < tstat_user_pgszs; j++) {
1897*0Sstevel@tonic-gate 					if ((szc = USERSZC_2_SZC(j)) != j) {
1898*0Sstevel@tonic-gate 						bcopy(&tp[szc], &tp[j],
1899*0Sstevel@tonic-gate 						    sizeof (tstat_pgszdata_t));
1900*0Sstevel@tonic-gate 					}
1901*0Sstevel@tonic-gate 				}
1902*0Sstevel@tonic-gate 			}
1903*0Sstevel@tonic-gate 
1904*0Sstevel@tonic-gate 			if (copyout(tstat_buffer, (void *)arg, dsize) != 0) {
1905*0Sstevel@tonic-gate 				mutex_exit(&tstat_lock);
1906*0Sstevel@tonic-gate 				return (EFAULT);
1907*0Sstevel@tonic-gate 			}
1908*0Sstevel@tonic-gate 
1909*0Sstevel@tonic-gate 			out++;
1910*0Sstevel@tonic-gate 			arg += dsize;
1911*0Sstevel@tonic-gate 		}
1912*0Sstevel@tonic-gate 
1913*0Sstevel@tonic-gate 		if (out != max_cpuid + 1) {
1914*0Sstevel@tonic-gate 			processorid_t cpuid = -1;
1915*0Sstevel@tonic-gate 			arg += offsetof(tstat_data_t, tdata_cpuid);
1916*0Sstevel@tonic-gate 
1917*0Sstevel@tonic-gate 			if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) {
1918*0Sstevel@tonic-gate 				mutex_exit(&tstat_lock);
1919*0Sstevel@tonic-gate 				return (EFAULT);
1920*0Sstevel@tonic-gate 			}
1921*0Sstevel@tonic-gate 		}
1922*0Sstevel@tonic-gate 
1923*0Sstevel@tonic-gate 		mutex_exit(&tstat_lock);
1924*0Sstevel@tonic-gate 
1925*0Sstevel@tonic-gate 		return (0);
1926*0Sstevel@tonic-gate 
1927*0Sstevel@tonic-gate 	case TSTATIOC_TLBDATA:
1928*0Sstevel@tonic-gate 		return (trapstat_option(TSTAT_OPT_TLBDATA));
1929*0Sstevel@tonic-gate 
1930*0Sstevel@tonic-gate 	default:
1931*0Sstevel@tonic-gate 		break;
1932*0Sstevel@tonic-gate 	}
1933*0Sstevel@tonic-gate 
1934*0Sstevel@tonic-gate 	return (ENOTTY);
1935*0Sstevel@tonic-gate }
1936*0Sstevel@tonic-gate 
1937*0Sstevel@tonic-gate /*ARGSUSED*/
1938*0Sstevel@tonic-gate static int
1939*0Sstevel@tonic-gate trapstat_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
1940*0Sstevel@tonic-gate {
1941*0Sstevel@tonic-gate 	int error;
1942*0Sstevel@tonic-gate 
1943*0Sstevel@tonic-gate 	switch (infocmd) {
1944*0Sstevel@tonic-gate 	case DDI_INFO_DEVT2DEVINFO:
1945*0Sstevel@tonic-gate 		*result = (void *)tstat_devi;
1946*0Sstevel@tonic-gate 		error = DDI_SUCCESS;
1947*0Sstevel@tonic-gate 		break;
1948*0Sstevel@tonic-gate 	case DDI_INFO_DEVT2INSTANCE:
1949*0Sstevel@tonic-gate 		*result = (void *)0;
1950*0Sstevel@tonic-gate 		error = DDI_SUCCESS;
1951*0Sstevel@tonic-gate 		break;
1952*0Sstevel@tonic-gate 	default:
1953*0Sstevel@tonic-gate 		error = DDI_FAILURE;
1954*0Sstevel@tonic-gate 	}
1955*0Sstevel@tonic-gate 	return (error);
1956*0Sstevel@tonic-gate }
1957*0Sstevel@tonic-gate 
1958*0Sstevel@tonic-gate static int
1959*0Sstevel@tonic-gate trapstat_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1960*0Sstevel@tonic-gate {
1961*0Sstevel@tonic-gate 	switch (cmd) {
1962*0Sstevel@tonic-gate 	case DDI_ATTACH:
1963*0Sstevel@tonic-gate 		break;
1964*0Sstevel@tonic-gate 
1965*0Sstevel@tonic-gate 	case DDI_RESUME:
1966*0Sstevel@tonic-gate 		return (DDI_SUCCESS);
1967*0Sstevel@tonic-gate 
1968*0Sstevel@tonic-gate 	default:
1969*0Sstevel@tonic-gate 		return (DDI_FAILURE);
1970*0Sstevel@tonic-gate 	}
1971*0Sstevel@tonic-gate 
1972*0Sstevel@tonic-gate 	if (ddi_create_minor_node(devi, "trapstat", S_IFCHR,
1973*0Sstevel@tonic-gate 	    0, DDI_PSEUDO, 0) == DDI_FAILURE) {
1974*0Sstevel@tonic-gate 		ddi_remove_minor_node(devi, NULL);
1975*0Sstevel@tonic-gate 		return (DDI_FAILURE);
1976*0Sstevel@tonic-gate 	}
1977*0Sstevel@tonic-gate 
1978*0Sstevel@tonic-gate 	ddi_report_dev(devi);
1979*0Sstevel@tonic-gate 	tstat_devi = devi;
1980*0Sstevel@tonic-gate 
1981*0Sstevel@tonic-gate 	tstat_pgszs = page_num_pagesizes();
1982*0Sstevel@tonic-gate 	tstat_user_pgszs = page_num_user_pagesizes();
1983*0Sstevel@tonic-gate 	tstat_data_t_size = sizeof (tstat_data_t) +
1984*0Sstevel@tonic-gate 	    (tstat_pgszs - 1) * sizeof (tstat_pgszdata_t);
1985*0Sstevel@tonic-gate 	tstat_data_t_exported_size = sizeof (tstat_data_t) +
1986*0Sstevel@tonic-gate 	    (tstat_user_pgszs - 1) * sizeof (tstat_pgszdata_t);
1987*0Sstevel@tonic-gate #ifndef sun4v
1988*0Sstevel@tonic-gate 	tstat_data_pages = (tstat_data_t_size >> MMU_PAGESHIFT) + 1;
1989*0Sstevel@tonic-gate 	tstat_total_pages = TSTAT_INSTR_PAGES + tstat_data_pages;
1990*0Sstevel@tonic-gate 	tstat_data_size = tstat_data_pages * MMU_PAGESIZE;
1991*0Sstevel@tonic-gate 	tstat_total_size = TSTAT_INSTR_SIZE + tstat_data_size;
1992*0Sstevel@tonic-gate #else
1993*0Sstevel@tonic-gate 	tstat_data_pages = 0;
1994*0Sstevel@tonic-gate 	tstat_data_size = tstat_data_t_size;
1995*0Sstevel@tonic-gate 	tstat_total_pages = ((TSTAT_INSTR_SIZE + tstat_data_size) >>
1996*0Sstevel@tonic-gate 		MMU_PAGESHIFT) + 1;
1997*0Sstevel@tonic-gate 	tstat_total_size = tstat_total_pages * MMU_PAGESIZE;
1998*0Sstevel@tonic-gate #endif
1999*0Sstevel@tonic-gate 
2000*0Sstevel@tonic-gate 	tstat_percpu = kmem_zalloc((max_cpuid + 1) *
2001*0Sstevel@tonic-gate 	    sizeof (tstat_percpu_t), KM_SLEEP);
2002*0Sstevel@tonic-gate 
2003*0Sstevel@tonic-gate 	/*
2004*0Sstevel@tonic-gate 	 * Create our own arena backed by segkmem to assure a source of
2005*0Sstevel@tonic-gate 	 * MMU_PAGESIZE-aligned allocations.  We allocate out of the
2006*0Sstevel@tonic-gate 	 * heap32_arena to assure that we can address the allocated memory with
2007*0Sstevel@tonic-gate 	 * a single sethi/simm13 pair in the interposing trap table entries.
2008*0Sstevel@tonic-gate 	 */
2009*0Sstevel@tonic-gate 	tstat_arena = vmem_create("trapstat", NULL, 0, MMU_PAGESIZE,
2010*0Sstevel@tonic-gate 	    segkmem_alloc_permanent, segkmem_free, heap32_arena, 0, VM_SLEEP);
2011*0Sstevel@tonic-gate 
2012*0Sstevel@tonic-gate 	tstat_enabled = kmem_alloc(TSTAT_TOTAL_NENT * sizeof (int), KM_SLEEP);
2013*0Sstevel@tonic-gate 	tstat_buffer = kmem_alloc(tstat_data_t_size, KM_SLEEP);
2014*0Sstevel@tonic-gate 
2015*0Sstevel@tonic-gate 	/*
2016*0Sstevel@tonic-gate 	 * CB_CL_CPR_POST_USER is the class that executes from cpr_resume()
2017*0Sstevel@tonic-gate 	 * after user threads can be restarted.  By executing in this class,
2018*0Sstevel@tonic-gate 	 * we are assured of the availability of system services needed to
2019*0Sstevel@tonic-gate 	 * resume trapstat (specifically, we are assured that all CPUs are
2020*0Sstevel@tonic-gate 	 * restarted and responding to cross calls).
2021*0Sstevel@tonic-gate 	 */
2022*0Sstevel@tonic-gate 	tstat_cprcb =
2023*0Sstevel@tonic-gate 	    callb_add(trapstat_cpr, NULL, CB_CL_CPR_POST_USER, "trapstat");
2024*0Sstevel@tonic-gate 
2025*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
2026*0Sstevel@tonic-gate }
2027*0Sstevel@tonic-gate 
2028*0Sstevel@tonic-gate static int
2029*0Sstevel@tonic-gate trapstat_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
2030*0Sstevel@tonic-gate {
2031*0Sstevel@tonic-gate 	int rval;
2032*0Sstevel@tonic-gate 
2033*0Sstevel@tonic-gate 	ASSERT(devi == tstat_devi);
2034*0Sstevel@tonic-gate 
2035*0Sstevel@tonic-gate 	switch (cmd) {
2036*0Sstevel@tonic-gate 	case DDI_DETACH:
2037*0Sstevel@tonic-gate 		break;
2038*0Sstevel@tonic-gate 
2039*0Sstevel@tonic-gate 	case DDI_SUSPEND:
2040*0Sstevel@tonic-gate 		return (DDI_SUCCESS);
2041*0Sstevel@tonic-gate 
2042*0Sstevel@tonic-gate 	default:
2043*0Sstevel@tonic-gate 		return (DDI_FAILURE);
2044*0Sstevel@tonic-gate 	}
2045*0Sstevel@tonic-gate 
2046*0Sstevel@tonic-gate 	ASSERT(!tstat_running);
2047*0Sstevel@tonic-gate 
2048*0Sstevel@tonic-gate 	rval = callb_delete(tstat_cprcb);
2049*0Sstevel@tonic-gate 	ASSERT(rval == 0);
2050*0Sstevel@tonic-gate 
2051*0Sstevel@tonic-gate 	kmem_free(tstat_buffer, tstat_data_t_size);
2052*0Sstevel@tonic-gate 	kmem_free(tstat_enabled, TSTAT_TOTAL_NENT * sizeof (int));
2053*0Sstevel@tonic-gate 	vmem_destroy(tstat_arena);
2054*0Sstevel@tonic-gate 	kmem_free(tstat_percpu, (max_cpuid + 1) * sizeof (tstat_percpu_t));
2055*0Sstevel@tonic-gate 	ddi_remove_minor_node(devi, NULL);
2056*0Sstevel@tonic-gate 
2057*0Sstevel@tonic-gate 	return (DDI_SUCCESS);
2058*0Sstevel@tonic-gate }
2059*0Sstevel@tonic-gate 
2060*0Sstevel@tonic-gate /*
2061*0Sstevel@tonic-gate  * Configuration data structures
2062*0Sstevel@tonic-gate  */
2063*0Sstevel@tonic-gate static struct cb_ops trapstat_cb_ops = {
2064*0Sstevel@tonic-gate 	trapstat_open,		/* open */
2065*0Sstevel@tonic-gate 	trapstat_close,		/* close */
2066*0Sstevel@tonic-gate 	nulldev,		/* strategy */
2067*0Sstevel@tonic-gate 	nulldev,		/* print */
2068*0Sstevel@tonic-gate 	nodev,			/* dump */
2069*0Sstevel@tonic-gate 	nodev,			/* read */
2070*0Sstevel@tonic-gate 	nodev,			/* write */
2071*0Sstevel@tonic-gate 	trapstat_ioctl,		/* ioctl */
2072*0Sstevel@tonic-gate 	nodev,			/* devmap */
2073*0Sstevel@tonic-gate 	nodev,			/* mmap */
2074*0Sstevel@tonic-gate 	nodev,			/* segmap */
2075*0Sstevel@tonic-gate 	nochpoll,		/* poll */
2076*0Sstevel@tonic-gate 	ddi_prop_op,		/* cb_prop_op */
2077*0Sstevel@tonic-gate 	0,			/* streamtab */
2078*0Sstevel@tonic-gate 	D_MP | D_NEW		/* Driver compatibility flag */
2079*0Sstevel@tonic-gate };
2080*0Sstevel@tonic-gate 
2081*0Sstevel@tonic-gate static struct dev_ops trapstat_ops = {
2082*0Sstevel@tonic-gate 	DEVO_REV,		/* devo_rev, */
2083*0Sstevel@tonic-gate 	0,			/* refcnt */
2084*0Sstevel@tonic-gate 	trapstat_info,		/* getinfo */
2085*0Sstevel@tonic-gate 	nulldev,		/* identify */
2086*0Sstevel@tonic-gate 	nulldev,		/* probe */
2087*0Sstevel@tonic-gate 	trapstat_attach,	/* attach */
2088*0Sstevel@tonic-gate 	trapstat_detach,	/* detach */
2089*0Sstevel@tonic-gate 	nulldev,		/* reset */
2090*0Sstevel@tonic-gate 	&trapstat_cb_ops,	/* cb_ops */
2091*0Sstevel@tonic-gate 	(struct bus_ops *)0,	/* bus_ops */
2092*0Sstevel@tonic-gate };
2093*0Sstevel@tonic-gate 
2094*0Sstevel@tonic-gate static struct modldrv modldrv = {
2095*0Sstevel@tonic-gate 	&mod_driverops,		/* Type of module.  This one is a driver */
2096*0Sstevel@tonic-gate 	"Trap Statistics",	/* name of module */
2097*0Sstevel@tonic-gate 	&trapstat_ops,		/* driver ops */
2098*0Sstevel@tonic-gate };
2099*0Sstevel@tonic-gate 
2100*0Sstevel@tonic-gate static struct modlinkage modlinkage = {
2101*0Sstevel@tonic-gate 	MODREV_1, (void *)&modldrv, NULL
2102*0Sstevel@tonic-gate };
2103*0Sstevel@tonic-gate 
2104*0Sstevel@tonic-gate int
2105*0Sstevel@tonic-gate _init(void)
2106*0Sstevel@tonic-gate {
2107*0Sstevel@tonic-gate 	return (mod_install(&modlinkage));
2108*0Sstevel@tonic-gate }
2109*0Sstevel@tonic-gate 
2110*0Sstevel@tonic-gate int
2111*0Sstevel@tonic-gate _fini(void)
2112*0Sstevel@tonic-gate {
2113*0Sstevel@tonic-gate 	return (mod_remove(&modlinkage));
2114*0Sstevel@tonic-gate }
2115*0Sstevel@tonic-gate 
2116*0Sstevel@tonic-gate int
2117*0Sstevel@tonic-gate _info(struct modinfo *modinfop)
2118*0Sstevel@tonic-gate {
2119*0Sstevel@tonic-gate 	return (mod_info(&modlinkage, modinfop));
2120*0Sstevel@tonic-gate }
2121