1 /* $NetBSD: pmu_fdt.c,v 1.12 2023/10/02 08:42:20 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 Jared McNeill <jmcneill@invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: pmu_fdt.c,v 1.12 2023/10/02 08:42:20 riastradh Exp $");
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/device.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/cpu.h>
38 #include <sys/interrupt.h>
39 #include <sys/kmem.h>
40 #include <sys/xcall.h>
41
42 #include <dev/fdt/fdtvar.h>
43
44 #if defined(__aarch64__)
45 #include <dev/tprof/tprof_armv8.h>
46 #define arm_pmu_intr armv8_pmu_intr
47 #define arm_pmu_init armv8_pmu_init
48 #elif defined(_ARM_ARCH_7)
49 #include <dev/tprof/tprof_armv7.h>
50 #define arm_pmu_intr armv7_pmu_intr
51 #define arm_pmu_init armv7_pmu_init
52 #endif
53
54 #include <arm/armreg.h>
55
56 static bool pmu_fdt_uses_ppi;
57 static int pmu_fdt_count;
58
59 static int pmu_fdt_match(device_t, cfdata_t, void *);
60 static void pmu_fdt_attach(device_t, device_t, void *);
61
62 static void pmu_fdt_init(device_t);
63 static int pmu_fdt_intr_distribute(const int, int, void *);
64
65 static const struct device_compatible_entry compat_data[] = {
66 { .compat = "arm,armv8-pmuv3" },
67 { .compat = "arm,cortex-a73-pmu" },
68 { .compat = "arm,cortex-a72-pmu" },
69 { .compat = "arm,cortex-a57-pmu" },
70 { .compat = "arm,cortex-a53-pmu" },
71
72 { .compat = "arm,cortex-a35-pmu" },
73 { .compat = "arm,cortex-a17-pmu" },
74 { .compat = "arm,cortex-a12-pmu" },
75 { .compat = "arm,cortex-a9-pmu" },
76 { .compat = "arm,cortex-a8-pmu" },
77 { .compat = "arm,cortex-a7-pmu" },
78 { .compat = "arm,cortex-a5-pmu" },
79
80 DEVICE_COMPAT_EOL
81 };
82
83 struct pmu_fdt_softc {
84 device_t sc_dev;
85 int sc_phandle;
86 };
87
88 CFATTACH_DECL_NEW(pmu_fdt, sizeof(struct pmu_fdt_softc),
89 pmu_fdt_match, pmu_fdt_attach, NULL, NULL);
90
91 static int
pmu_fdt_match(device_t parent,cfdata_t cf,void * aux)92 pmu_fdt_match(device_t parent, cfdata_t cf, void *aux)
93 {
94 struct fdt_attach_args * const faa = aux;
95
96 return of_compatible_match(faa->faa_phandle, compat_data);
97 }
98
99 static void
pmu_fdt_attach(device_t parent,device_t self,void * aux)100 pmu_fdt_attach(device_t parent, device_t self, void *aux)
101 {
102 struct pmu_fdt_softc * const sc = device_private(self);
103 struct fdt_attach_args * const faa = aux;
104 const int phandle = faa->faa_phandle;
105
106 aprint_naive("\n");
107 aprint_normal(": Performance Monitor Unit\n");
108
109 sc->sc_dev = self;
110 sc->sc_phandle = phandle;
111
112 config_interrupts(self, pmu_fdt_init);
113 }
114
115 static void
pmu_fdt_init(device_t self)116 pmu_fdt_init(device_t self)
117 {
118 struct pmu_fdt_softc * const sc = device_private(self);
119 const int phandle = sc->sc_phandle;
120 char intrstr[128];
121 int error, n;
122 void **ih;
123
124 if (pmu_fdt_uses_ppi && pmu_fdt_count > 0) {
125 /*
126 * Second instance of a PMU where PPIs are used. Since the PMU
127 * is already initialized and the PPI interrupt handler has
128 * already been installed, there is nothing left to do here.
129 */
130 if (fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr)))
131 aprint_normal_dev(self, "interrupting on %s\n", intrstr);
132 return;
133 }
134
135 if (pmu_fdt_count == 0) {
136 error = arm_pmu_init();
137 if (error) {
138 aprint_error_dev(self,
139 "couldn't initialise PMU event counter\n");
140 return;
141 }
142 }
143
144 ih = kmem_zalloc(sizeof(void *) * ncpu, KM_SLEEP);
145
146 for (n = 0; n < ncpu; n++) {
147 ih[n] = fdtbus_intr_establish_xname(phandle, n, IPL_HIGH,
148 FDT_INTR_MPSAFE, arm_pmu_intr, NULL, device_xname(self));
149 if (ih[n] == NULL)
150 break;
151 if (!fdtbus_intr_str(phandle, n, intrstr, sizeof(intrstr))) {
152 aprint_error_dev(self,
153 "couldn't decode interrupt %u\n", n);
154 goto cleanup;
155 }
156 aprint_normal_dev(self, "interrupting on %s\n", intrstr);
157 }
158
159 /* We need either one IRQ (PPI), or one per CPU (SPI) */
160 const int nirq = n;
161 if (nirq == 0) {
162 aprint_error_dev(self, "couldn't establish interrupts\n");
163 goto cleanup;
164 }
165
166 /* Set interrupt affinity if we have more than one interrupt */
167 if (nirq > 1) {
168 for (n = 0; n < nirq; n++) {
169 error = pmu_fdt_intr_distribute(phandle, n, ih[n]);
170 if (error != 0) {
171 aprint_error_dev(self,
172 "failed to distribute interrupt %u: %d\n",
173 n, error);
174 goto cleanup;
175 }
176 }
177 }
178
179 pmu_fdt_count++;
180 pmu_fdt_uses_ppi = nirq == 1 && ncpu > 1;
181
182 cleanup:
183 kmem_free(ih, sizeof(void *) * ncpu);
184 }
185
186 static int
pmu_fdt_intr_distribute(const int phandle,int index,void * ih)187 pmu_fdt_intr_distribute(const int phandle, int index, void *ih)
188 {
189 CPU_INFO_ITERATOR cii;
190 struct cpu_info *ci;
191 bus_addr_t mpidr;
192 int len, cpunode;
193 const u_int *aff;
194 kcpuset_t *set;
195 int error;
196
197 kcpuset_create(&set, true);
198
199 if (of_hasprop(phandle, "interrupt-affinity")) {
200 aff = fdtbus_get_prop(phandle, "interrupt-affinity", &len);
201 if (len < (index + 1) * 4)
202 return EINVAL;
203 cpunode = fdtbus_get_phandle_from_native(be32toh(aff[index]));
204 if (fdtbus_get_reg(cpunode, 0, &mpidr, NULL) != 0)
205 return ENXIO;
206 for (CPU_INFO_FOREACH(cii, ci)) {
207 const uint32_t ci_mpidr =
208 __SHIFTIN(ci->ci_core_id, MPIDR_AFF0) |
209 __SHIFTIN(ci->ci_package_id, MPIDR_AFF1);
210 if (ci_mpidr == mpidr) {
211 kcpuset_set(set, cpu_index(ci));
212 break;
213 }
214 }
215 } else {
216 kcpuset_set(set, index);
217 }
218
219 if (kcpuset_iszero(set)) {
220 kcpuset_destroy(set);
221 return ENOENT;
222 }
223
224 error = interrupt_distribute(ih, set, NULL);
225
226 kcpuset_destroy(set);
227
228 return error;
229 }
230