1 /* $NetBSD: cpu_acpi.c,v 1.17 2024/12/30 19:17:21 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jared McNeill <jmcneill@invisible.ca>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "tprof.h" 33 #include "opt_multiprocessor.h" 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: cpu_acpi.c,v 1.17 2024/12/30 19:17:21 jmcneill Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/cpu.h> 41 #include <sys/device.h> 42 #include <sys/interrupt.h> 43 #include <sys/kcpuset.h> 44 #include <sys/kmem.h> 45 #include <sys/reboot.h> 46 47 #include <dev/acpi/acpireg.h> 48 #include <dev/acpi/acpivar.h> 49 #include <dev/acpi/acpi_srat.h> 50 #include <external/bsd/acpica/dist/include/amlresrc.h> 51 52 #include <arm/armreg.h> 53 #include <arm/cpu.h> 54 #include <arm/cpufunc.h> 55 #include <arm/cpuvar.h> 56 #include <arm/locore.h> 57 58 #include <arm/arm/psci.h> 59 60 #define LPI_IDLE_FACTOR 3 61 62 #if NTPROF > 0 63 #include <dev/tprof/tprof_armv8.h> 64 #endif 65 66 static int cpu_acpi_match(device_t, cfdata_t, void *); 67 static void cpu_acpi_attach(device_t, device_t, void *); 68 69 static void cpu_acpi_probe_lpi(device_t, struct cpu_info *ci); 70 void cpu_acpi_lpi_idle(void); 71 72 #if NTPROF > 0 73 static void cpu_acpi_tprof_init(device_t); 74 #endif 75 76 CFATTACH_DECL2_NEW(cpu_acpi, 0, 77 cpu_acpi_match, cpu_acpi_attach, NULL, NULL, 78 cpu_rescan, cpu_childdetached); 79 80 #ifdef MULTIPROCESSOR 81 static register_t 82 cpu_acpi_mpstart_pa(void) 83 { 84 85 return (register_t)KERN_VTOPHYS((vaddr_t)cpu_mpstart); 86 } 87 #endif /* MULTIPROCESSOR */ 88 89 static int 90 cpu_acpi_match(device_t parent, cfdata_t cf, void *aux) 91 { 92 ACPI_SUBTABLE_HEADER *hdrp = aux; 93 ACPI_MADT_GENERIC_INTERRUPT *gicc; 94 95 if (hdrp->Type != ACPI_MADT_TYPE_GENERIC_INTERRUPT) 96 return 0; 97 98 gicc = (ACPI_MADT_GENERIC_INTERRUPT *)hdrp; 99 100 return (gicc->Flags & ACPI_MADT_ENABLED) != 0; 101 } 102 103 static void 104 cpu_acpi_attach(device_t parent, device_t self, void *aux) 105 { 106 prop_dictionary_t dict = device_properties(self); 107 ACPI_MADT_GENERIC_INTERRUPT *gicc = aux; 108 const uint64_t mpidr = gicc->ArmMpidr; 109 const int unit = device_unit(self); 110 struct cpu_info *ci = &cpu_info_store[unit]; 111 struct acpisrat_node *node; 112 113 #ifdef MULTIPROCESSOR 114 if (cpu_mpidr_aff_read() != mpidr && (boothowto & RB_MD1) == 0) { 115 const u_int cpuindex = device_unit(self); 116 int error; 117 118 cpu_mpidr[cpuindex] = mpidr; 119 cpu_dcache_wb_range((vaddr_t)&cpu_mpidr[cpuindex], 120 sizeof(cpu_mpidr[cpuindex])); 121 122 /* XXX support spin table */ 123 error = psci_cpu_on(mpidr, cpu_acpi_mpstart_pa(), 0); 124 if (error != PSCI_SUCCESS) { 125 aprint_error_dev(self, "failed to start CPU\n"); 126 return; 127 } 128 129 sev(); 130 131 for (u_int i = 0x10000000; i > 0; i--) { 132 if (cpu_hatched_p(cpuindex)) 133 break; 134 } 135 } 136 #endif /* MULTIPROCESSOR */ 137 138 /* Assume that less efficient processors are faster. */ 139 prop_dictionary_set_uint32(dict, "capacity_dmips_mhz", 140 gicc->EfficiencyClass); 141 142 /* Store the ACPI Processor UID in cpu_info */ 143 ci->ci_acpiid = gicc->Uid; 144 145 /* Scan SRAT for NUMA info. */ 146 if (cpu_mpidr_aff_read() == mpidr) { 147 acpisrat_init(); 148 } 149 node = acpisrat_get_node(gicc->Uid); 150 if (node != NULL) { 151 ci->ci_numa_id = node->nodeid; 152 } 153 154 /* Attach the CPU */ 155 cpu_attach(self, mpidr); 156 157 /* Probe for low-power idle states. */ 158 cpu_acpi_probe_lpi(self, ci); 159 160 #if NTPROF > 0 161 if (cpu_mpidr_aff_read() == mpidr && armv8_pmu_detect()) 162 config_interrupts(self, cpu_acpi_tprof_init); 163 #endif 164 } 165 166 static void 167 cpu_acpi_probe_lpi(device_t dev, struct cpu_info *ci) 168 { 169 ACPI_HANDLE hdl; 170 ACPI_BUFFER buf; 171 ACPI_OBJECT *obj, *lpi; 172 ACPI_STATUS rv; 173 uint32_t levelid; 174 uint32_t numlpi; 175 uint32_t n; 176 int enable_lpi; 177 178 if (get_bootconf_option(boot_args, "nolpi", 179 BOOTOPT_TYPE_BOOLEAN, &enable_lpi) && 180 !enable_lpi) { 181 return; 182 } 183 184 hdl = acpi_match_cpu_info(ci); 185 if (hdl == NULL) { 186 return; 187 } 188 rv = AcpiGetHandle(hdl, "_LPI", &hdl); 189 if (ACPI_FAILURE(rv)) { 190 return; 191 } 192 rv = acpi_eval_struct(hdl, NULL, &buf); 193 if (ACPI_FAILURE(rv)) { 194 return; 195 } 196 197 obj = buf.Pointer; 198 if (obj->Type != ACPI_TYPE_PACKAGE || 199 obj->Package.Count < 3 || 200 obj->Package.Elements[1].Type != ACPI_TYPE_INTEGER || 201 obj->Package.Elements[2].Type != ACPI_TYPE_INTEGER) { 202 goto out; 203 } 204 levelid = obj->Package.Elements[1].Integer.Value; 205 if (levelid != 0) { 206 /* We depend on platform coordination for now. */ 207 goto out; 208 } 209 numlpi = obj->Package.Elements[2].Integer.Value; 210 if (obj->Package.Count < 3 + numlpi || numlpi == 0) { 211 goto out; 212 } 213 ci->ci_lpi = kmem_zalloc(sizeof(*ci->ci_lpi) * numlpi, KM_SLEEP); 214 for (n = 0; n < numlpi; n++) { 215 lpi = &obj->Package.Elements[3 + n]; 216 if (lpi->Type != ACPI_TYPE_PACKAGE || 217 lpi->Package.Count < 10 || 218 lpi->Package.Elements[0].Type != ACPI_TYPE_INTEGER || 219 lpi->Package.Elements[1].Type != ACPI_TYPE_INTEGER || 220 lpi->Package.Elements[2].Type != ACPI_TYPE_INTEGER || 221 lpi->Package.Elements[3].Type != ACPI_TYPE_INTEGER || 222 !(lpi->Package.Elements[6].Type == ACPI_TYPE_BUFFER || 223 lpi->Package.Elements[6].Type == ACPI_TYPE_INTEGER)) { 224 continue; 225 } 226 227 if ((lpi->Package.Elements[2].Integer.Value & 1) == 0) { 228 /* LPI state is not enabled */ 229 continue; 230 } 231 232 ci->ci_lpi[ci->ci_nlpi].min_res 233 = lpi->Package.Elements[0].Integer.Value; 234 ci->ci_lpi[ci->ci_nlpi].wakeup_latency = 235 lpi->Package.Elements[1].Integer.Value; 236 ci->ci_lpi[ci->ci_nlpi].save_restore_flags = 237 lpi->Package.Elements[3].Integer.Value; 238 if (ci->ci_lpi[ci->ci_nlpi].save_restore_flags != 0) { 239 /* Not implemented yet */ 240 continue; 241 } 242 if (lpi->Package.Elements[6].Type == ACPI_TYPE_INTEGER) { 243 ci->ci_lpi[ci->ci_nlpi].reg_addr = 244 lpi->Package.Elements[6].Integer.Value; 245 } else { 246 ACPI_GENERIC_ADDRESS addr; 247 248 KASSERT(lpi->Package.Elements[6].Type == 249 ACPI_TYPE_BUFFER); 250 251 if (lpi->Package.Elements[6].Buffer.Length < 252 sizeof(AML_RESOURCE_GENERIC_REGISTER)) { 253 continue; 254 } 255 memcpy(&addr, lpi->Package.Elements[6].Buffer.Pointer + 256 sizeof(AML_RESOURCE_LARGE_HEADER), sizeof(addr)); 257 ci->ci_lpi[ci->ci_nlpi].reg_addr = addr.Address; 258 } 259 260 if (lpi->Package.Elements[9].Type == ACPI_TYPE_STRING) { 261 ci->ci_lpi[ci->ci_nlpi].name = 262 kmem_asprintf("LPI state %s", 263 lpi->Package.Elements[9].String.Pointer); 264 } else { 265 ci->ci_lpi[ci->ci_nlpi].name = 266 kmem_asprintf("LPI state %u", n + 1); 267 } 268 269 aprint_verbose_dev(ci->ci_dev, 270 "%s: min res %u, wakeup latency %u, flags %#x, " 271 "register %#x\n", 272 ci->ci_lpi[ci->ci_nlpi].name, 273 ci->ci_lpi[ci->ci_nlpi].min_res, 274 ci->ci_lpi[ci->ci_nlpi].wakeup_latency, 275 ci->ci_lpi[ci->ci_nlpi].save_restore_flags, 276 ci->ci_lpi[ci->ci_nlpi].reg_addr); 277 278 evcnt_attach_dynamic(&ci->ci_lpi[ci->ci_nlpi].events, 279 EVCNT_TYPE_MISC, NULL, ci->ci_cpuname, 280 ci->ci_lpi[ci->ci_nlpi].name); 281 282 ci->ci_nlpi++; 283 } 284 285 if (ci->ci_nlpi > 0) { 286 extern void (*arm_cpu_idle)(void); 287 arm_cpu_idle = cpu_acpi_lpi_idle; 288 } 289 290 out: 291 ACPI_FREE(buf.Pointer); 292 } 293 294 static inline void 295 cpu_acpi_idle(uint32_t addr) 296 { 297 if (addr == LPI_REG_ADDR_WFI) { 298 asm volatile("dsb sy; wfi"); 299 } else { 300 psci_cpu_suspend(addr); 301 } 302 } 303 304 void 305 cpu_acpi_lpi_idle(void) 306 { 307 struct cpu_info *ci = curcpu(); 308 struct timeval start, end; 309 int n; 310 311 DISABLE_INTERRUPT(); 312 313 microuptime(&start); 314 for (n = ci->ci_nlpi - 1; n >= 0; n--) { 315 if (ci->ci_last_idle > 316 LPI_IDLE_FACTOR * ci->ci_lpi[n].min_res) { 317 cpu_acpi_idle(ci->ci_lpi[n].reg_addr); 318 ci->ci_lpi[n].events.ev_count++; 319 break; 320 } 321 } 322 if (n == -1) { 323 /* Nothing in _LPI, let's just WFI. */ 324 cpu_acpi_idle(LPI_REG_ADDR_WFI); 325 } 326 microuptime(&end); 327 timersub(&end, &start, &end); 328 329 ci->ci_last_idle = end.tv_sec * 1000000 + end.tv_usec; 330 331 ENABLE_INTERRUPT(); 332 } 333 334 #if NTPROF > 0 335 static struct cpu_info * 336 cpu_acpi_find_processor(UINT32 uid) 337 { 338 CPU_INFO_ITERATOR cii; 339 struct cpu_info *ci; 340 341 for (CPU_INFO_FOREACH(cii, ci)) { 342 if (ci->ci_acpiid == uid) 343 return ci; 344 } 345 346 return NULL; 347 } 348 349 static ACPI_STATUS 350 cpu_acpi_tprof_intr_establish(ACPI_SUBTABLE_HEADER *hdrp, void *aux) 351 { 352 device_t dev = aux; 353 ACPI_MADT_GENERIC_INTERRUPT *gicc; 354 struct cpu_info *ci; 355 char xname[16]; 356 kcpuset_t *set; 357 int error; 358 void *ih; 359 360 if (hdrp->Type != ACPI_MADT_TYPE_GENERIC_INTERRUPT) 361 return AE_OK; 362 363 gicc = (ACPI_MADT_GENERIC_INTERRUPT *)hdrp; 364 if ((gicc->Flags & ACPI_MADT_ENABLED) == 0) 365 return AE_OK; 366 367 const bool cpu_primary_p = cpu_info_store[0].ci_cpuid == gicc->ArmMpidr; 368 const bool intr_ppi_p = gicc->PerformanceInterrupt < 32; 369 const int type = (gicc->Flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) ? 370 IST_EDGE : IST_LEVEL; 371 372 if (intr_ppi_p && !cpu_primary_p) 373 return AE_OK; 374 375 ci = cpu_acpi_find_processor(gicc->Uid); 376 if (ci == NULL) { 377 aprint_error_dev(dev, "couldn't find processor %#x\n", 378 gicc->Uid); 379 return AE_OK; 380 } 381 382 if (intr_ppi_p) { 383 strlcpy(xname, "pmu", sizeof(xname)); 384 } else { 385 snprintf(xname, sizeof(xname), "pmu %s", cpu_name(ci)); 386 } 387 388 ih = intr_establish_xname(gicc->PerformanceInterrupt, IPL_HIGH, 389 type | IST_MPSAFE, armv8_pmu_intr, NULL, xname); 390 if (ih == NULL) { 391 aprint_error_dev(dev, "couldn't establish %s interrupt\n", 392 xname); 393 return AE_OK; 394 } 395 396 if (!intr_ppi_p) { 397 kcpuset_create(&set, true); 398 kcpuset_set(set, cpu_index(ci)); 399 error = interrupt_distribute(ih, set, NULL); 400 kcpuset_destroy(set); 401 402 if (error) { 403 aprint_error_dev(dev, 404 "failed to distribute %s interrupt: %d\n", 405 xname, error); 406 return AE_OK; 407 } 408 } 409 410 aprint_normal("%s: PMU interrupting on irq %d\n", cpu_name(ci), 411 gicc->PerformanceInterrupt); 412 413 return AE_OK; 414 } 415 416 static void 417 cpu_acpi_tprof_init(device_t self) 418 { 419 int err = armv8_pmu_init(); 420 if (err) { 421 aprint_error_dev(self, 422 "failed to initialize PMU event counter\n"); 423 return; 424 } 425 426 if (acpi_madt_map() != AE_OK) { 427 aprint_error_dev(self, 428 "failed to map MADT, performance counters not available\n"); 429 return; 430 } 431 acpi_madt_walk(cpu_acpi_tprof_intr_establish, self); 432 acpi_madt_unmap(); 433 } 434 #endif 435