xref: /netbsd-src/sys/dev/tprof/tprof_armv7.c (revision aef5eb5f59cdfe8314f1b5f78ac04eb144e44010)
1 /* $NetBSD: tprof_armv7.c,v 1.6 2021/11/26 13:24:28 christos Exp $ */
2 
3 /*-
4  * Copyright (c) 2018 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: tprof_armv7.c,v 1.6 2021/11/26 13:24:28 christos Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/cpu.h>
35 #include <sys/xcall.h>
36 
37 #include <dev/tprof/tprof.h>
38 
39 #include <arm/armreg.h>
40 #include <arm/locore.h>
41 
42 #include <dev/tprof/tprof_armv7.h>
43 
44 #define	PMCR_N			__BITS(15,11)
45 #define	PMCR_D			__BIT(3)
46 #define	PMCR_E			__BIT(0)
47 
48 #define	PMEVTYPER_P		__BIT(31)
49 #define	PMEVTYPER_U		__BIT(30)
50 #define	PMEVTYPER_EVTCOUNT	__BITS(7,0)
51 
52 static tprof_param_t armv7_pmu_param;
53 static const u_int armv7_pmu_counter = 1;
54 static uint32_t counter_val;
55 static uint32_t counter_reset_val;
56 
57 static bool
58 armv7_pmu_event_implemented(uint16_t event)
59 {
60 	uint32_t eid[2];
61 
62 	if (event >= 64)
63 		return false;
64 
65 	eid[0] = armreg_pmceid0_read();
66 	eid[1] = armreg_pmceid1_read();
67 
68 	const u_int idx = event / 32;
69 	const u_int bit = event % 32;
70 
71 	if (eid[idx] & __BIT(bit))
72 		return true;
73 
74 	return false;
75 }
76 
77 static void
78 armv7_pmu_set_pmevtyper(u_int counter, uint64_t val)
79 {
80 	armreg_pmselr_write(counter);
81 	isb();
82 	armreg_pmxevtyper_write(val);
83 }
84 
85 static void
86 armv7_pmu_set_pmevcntr(u_int counter, uint32_t val)
87 {
88 	armreg_pmselr_write(counter);
89 	isb();
90 	armreg_pmxevcntr_write(val);
91 }
92 
93 static void
94 armv7_pmu_start_cpu(void *arg1, void *arg2)
95 {
96 	const uint32_t counter_mask = __BIT(armv7_pmu_counter);
97 	uint64_t pmcr, pmevtyper;
98 
99 	/* Enable performance monitor */
100 	pmcr = armreg_pmcr_read();
101 	pmcr |= PMCR_E;
102 	armreg_pmcr_write(pmcr);
103 
104 	/* Disable event counter */
105 	armreg_pmcntenclr_write(counter_mask);
106 
107 	/* Configure event counter */
108 	pmevtyper = __SHIFTIN(armv7_pmu_param.p_event, PMEVTYPER_EVTCOUNT);
109 	if (!ISSET(armv7_pmu_param.p_flags, TPROF_PARAM_USER))
110 		pmevtyper |= PMEVTYPER_U;
111 	if (!ISSET(armv7_pmu_param.p_flags, TPROF_PARAM_KERN))
112 		pmevtyper |= PMEVTYPER_P;
113 
114 	armv7_pmu_set_pmevtyper(armv7_pmu_counter, pmevtyper);
115 
116 	/* Enable overflow interrupts */
117 	armreg_pmintenset_write(counter_mask);
118 
119 	/* Clear overflow flag */
120 	armreg_pmovsr_write(counter_mask);
121 
122 	/* Initialize event counter value */
123 	armv7_pmu_set_pmevcntr(armv7_pmu_counter, counter_reset_val);
124 
125 	/* Enable event counter */
126 	armreg_pmcntenset_write(counter_mask);
127 }
128 
129 static void
130 armv7_pmu_stop_cpu(void *arg1, void *arg2)
131 {
132 	const uint32_t counter_mask = __BIT(armv7_pmu_counter);
133 	uint32_t pmcr;
134 
135 	/* Disable overflow interrupts */
136 	armreg_pmintenclr_write(counter_mask);
137 
138 	/* Disable event counter */
139 	armreg_pmcntenclr_write(counter_mask);
140 
141 	/* Disable performance monitor */
142 	pmcr = armreg_pmcr_read();
143 	pmcr &= ~PMCR_E;
144 	armreg_pmcr_write(pmcr);
145 }
146 
147 static uint64_t
148 armv7_pmu_estimate_freq(void)
149 {
150 	uint64_t cpufreq = curcpu()->ci_data.cpu_cc_freq;
151 	uint64_t freq = 10000;
152 	uint32_t pmcr;
153 
154 	counter_val = cpufreq / freq;
155 	if (counter_val == 0)
156 		counter_val = 4000000000ULL / freq;
157 
158 	pmcr = armreg_pmcr_read();
159 	if (pmcr & PMCR_D)
160 		counter_val /= 64;
161 
162 	return freq;
163 }
164 
165 static uint32_t
166 armv7_pmu_ident(void)
167 {
168 	return TPROF_IDENT_ARMV7_GENERIC;
169 }
170 
171 static int
172 armv7_pmu_start(const tprof_param_t *param)
173 {
174 	/* PMCR.N of 0 means that no event counters are available */
175 	if (__SHIFTOUT(armreg_pmcr_read(), PMCR_N) == 0) {
176 		return EINVAL;
177 	}
178 
179 	if (!armv7_pmu_event_implemented(param->p_event)) {
180 		printf("%s: event %#llx not implemented on this CPU\n",
181 		    __func__, param->p_event);
182 		return EINVAL;
183 	}
184 
185 	counter_reset_val = -counter_val + 1;
186 
187 	armv7_pmu_param = *param;
188 	uint64_t xc = xc_broadcast(0, armv7_pmu_start_cpu, NULL, NULL);
189 	xc_wait(xc);
190 
191 	return 0;
192 }
193 
194 static void
195 armv7_pmu_stop(const tprof_param_t *param)
196 {
197 	uint64_t xc;
198 
199 	xc = xc_broadcast(0, armv7_pmu_stop_cpu, NULL, NULL);
200 	xc_wait(xc);
201 }
202 
203 static const tprof_backend_ops_t tprof_armv7_pmu_ops = {
204 	.tbo_estimate_freq = armv7_pmu_estimate_freq,
205 	.tbo_ident = armv7_pmu_ident,
206 	.tbo_start = armv7_pmu_start,
207 	.tbo_stop = armv7_pmu_stop,
208 };
209 
210 int
211 armv7_pmu_intr(void *priv)
212 {
213 	const struct trapframe * const tf = priv;
214 	const uint32_t counter_mask = __BIT(armv7_pmu_counter);
215 	tprof_frame_info_t tfi;
216 
217 	const uint32_t pmovsr = armreg_pmovsr_read();
218 	if ((pmovsr & counter_mask) != 0) {
219 		tfi.tfi_pc = tf->tf_pc;
220 		tfi.tfi_inkernel = tfi.tfi_pc >= VM_MIN_KERNEL_ADDRESS &&
221 		    tfi.tfi_pc < VM_MAX_KERNEL_ADDRESS;
222 		tprof_sample(NULL, &tfi);
223 
224 		armv7_pmu_set_pmevcntr(armv7_pmu_counter, counter_reset_val);
225 	}
226 	armreg_pmovsr_write(pmovsr);
227 
228 	return 1;
229 }
230 
231 int
232 armv7_pmu_init(void)
233 {
234 	/* Disable user mode access to performance monitors */
235 	armreg_pmuserenr_write(0);
236 
237 	/* Disable interrupts */
238 	armreg_pmintenclr_write(~0U);
239 
240 	/* Disable counters */
241 	armreg_pmcntenclr_write(~0U);
242 
243 	/* Disable performance monitor */
244 	armreg_pmcr_write(0);
245 
246 	return tprof_backend_register("tprof_armv7", &tprof_armv7_pmu_ops,
247 	    TPROF_BACKEND_VERSION);
248 }
249