1 /* $NetBSD: acpi_cpu_pstate.c,v 1.54 2020/12/07 10:57:41 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.54 2020/12/07 10:57:41 jmcneill Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpufreq.h>
34 #include <sys/cpu.h>
35 #include <sys/kmem.h>
36
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40
41 #define _COMPONENT ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME ("acpi_cpu_pstate")
43
44 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *);
45 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
46 ACPI_OBJECT *);
47 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *);
48 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
49 ACPI_OBJECT *);
50 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_pstate_dep(struct acpicpu_softc *);
52 static int acpicpu_pstate_max(struct acpicpu_softc *);
53 static int acpicpu_pstate_min(struct acpicpu_softc *);
54 static void acpicpu_pstate_change(struct acpicpu_softc *);
55 static void acpicpu_pstate_reset(struct acpicpu_softc *);
56 static void acpicpu_pstate_bios(void);
57
58 extern struct acpicpu_softc **acpicpu_sc;
59
60 void
acpicpu_pstate_attach(device_t self)61 acpicpu_pstate_attach(device_t self)
62 {
63 struct acpicpu_softc *sc = device_private(self);
64 const char *str;
65 ACPI_HANDLE tmp;
66 ACPI_STATUS rv;
67
68 rv = acpicpu_pstate_pss(sc);
69
70 if (ACPI_FAILURE(rv)) {
71 str = "_PSS";
72 goto fail;
73 }
74
75 /*
76 * Append additional information from the extended _PSS,
77 * if available. Note that XPSS can not be used on Intel
78 * systems that use either _PDC or _OSC. From the XPSS
79 * method specification:
80 *
81 * "The platform must not require the use of the
82 * optional _PDC or _OSC methods to coordinate
83 * between the operating system and firmware for
84 * the purposes of enabling specific processor
85 * power management features or implementations."
86 */
87 if (sc->sc_cap == 0) {
88
89 rv = acpicpu_pstate_xpss(sc);
90
91 if (ACPI_SUCCESS(rv))
92 sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
93 }
94
95 rv = acpicpu_pstate_pct(sc);
96
97 if (ACPI_FAILURE(rv)) {
98 str = "_PCT";
99 goto fail;
100 }
101
102 /*
103 * The ACPI 3.0 and 4.0 specifications mandate three
104 * objects for P-states: _PSS, _PCT, and _PPC. A less
105 * strict wording is however used in the earlier 2.0
106 * standard, and some systems conforming to ACPI 2.0
107 * do not have _PPC, the method for dynamic maximum.
108 */
109 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
110
111 if (ACPI_FAILURE(rv))
112 aprint_debug_dev(self, "_PPC missing\n");
113
114 /*
115 * Carry out MD initialization.
116 */
117 rv = acpicpu_md_pstate_init(sc);
118
119 if (rv != 0) {
120 rv = AE_SUPPORT;
121 goto fail;
122 }
123
124 /*
125 * Query the optional _PSD.
126 */
127 rv = acpicpu_pstate_dep(sc);
128
129 if (ACPI_SUCCESS(rv))
130 sc->sc_flags |= ACPICPU_FLAG_P_DEP;
131
132 sc->sc_pstate_current = 0;
133 sc->sc_flags |= ACPICPU_FLAG_P;
134
135 acpicpu_pstate_bios();
136 acpicpu_pstate_reset(sc);
137
138 return;
139
140 fail:
141 switch (rv) {
142
143 case AE_NOT_FOUND:
144 return;
145
146 case AE_SUPPORT:
147 aprint_verbose_dev(self, "P-states not supported\n");
148 return;
149
150 default:
151 aprint_error_dev(self, "failed to evaluate "
152 "%s: %s\n", str, AcpiFormatException(rv));
153 }
154 }
155
156 void
acpicpu_pstate_detach(device_t self)157 acpicpu_pstate_detach(device_t self)
158 {
159 struct acpicpu_softc *sc = device_private(self);
160 size_t size;
161
162 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
163 return;
164
165 (void)acpicpu_md_pstate_stop();
166
167 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
168
169 if (sc->sc_pstate != NULL)
170 kmem_free(sc->sc_pstate, size);
171
172 sc->sc_flags &= ~ACPICPU_FLAG_P;
173 }
174
175 void
acpicpu_pstate_start(device_t self)176 acpicpu_pstate_start(device_t self)
177 {
178 struct acpicpu_softc *sc = device_private(self);
179
180 if (acpicpu_md_pstate_start(sc) == 0)
181 return;
182
183 sc->sc_flags &= ~ACPICPU_FLAG_P;
184 aprint_error_dev(self, "failed to start P-states\n");
185 }
186
187 void
acpicpu_pstate_suspend(void * aux)188 acpicpu_pstate_suspend(void *aux)
189 {
190 struct acpicpu_softc *sc;
191 device_t self = aux;
192
193 /*
194 * Reset any dynamic limits.
195 */
196 sc = device_private(self);
197 mutex_enter(&sc->sc_mtx);
198 acpicpu_pstate_reset(sc);
199 mutex_exit(&sc->sc_mtx);
200 }
201
202 void
acpicpu_pstate_resume(void * aux)203 acpicpu_pstate_resume(void *aux)
204 {
205 /* Nothing. */
206 }
207
208 void
acpicpu_pstate_callback(void * aux)209 acpicpu_pstate_callback(void *aux)
210 {
211 struct acpicpu_softc *sc;
212 device_t self = aux;
213 uint32_t freq;
214
215 sc = device_private(self);
216 mutex_enter(&sc->sc_mtx);
217 acpicpu_pstate_change(sc);
218
219 freq = sc->sc_pstate[sc->sc_pstate_max].ps_freq;
220
221 if (sc->sc_pstate_saved == 0)
222 sc->sc_pstate_saved = sc->sc_pstate_current;
223
224 if (sc->sc_pstate_saved <= freq) {
225 freq = sc->sc_pstate_saved;
226 sc->sc_pstate_saved = 0;
227 }
228
229 mutex_exit(&sc->sc_mtx);
230 cpufreq_set(sc->sc_ci, freq);
231 }
232
233 static ACPI_STATUS
acpicpu_pstate_pss(struct acpicpu_softc * sc)234 acpicpu_pstate_pss(struct acpicpu_softc *sc)
235 {
236 struct acpicpu_pstate *ps;
237 ACPI_OBJECT *obj;
238 ACPI_BUFFER buf;
239 ACPI_STATUS rv;
240 uint32_t count;
241 uint32_t i, j;
242
243 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
244
245 if (ACPI_FAILURE(rv))
246 return rv;
247
248 obj = buf.Pointer;
249
250 if (obj->Type != ACPI_TYPE_PACKAGE) {
251 rv = AE_TYPE;
252 goto out;
253 }
254
255 sc->sc_pstate_count = obj->Package.Count;
256
257 if (sc->sc_pstate_count == 0) {
258 rv = AE_NOT_EXIST;
259 goto out;
260 }
261
262 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
263 rv = AE_LIMIT;
264 goto out;
265 }
266
267 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
268 sizeof(struct acpicpu_pstate), KM_SLEEP);
269
270 if (sc->sc_pstate == NULL) {
271 rv = AE_NO_MEMORY;
272 goto out;
273 }
274
275 for (count = i = 0; i < sc->sc_pstate_count; i++) {
276
277 ps = &sc->sc_pstate[i];
278 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
279
280 if (ACPI_FAILURE(rv)) {
281 aprint_error_dev(sc->sc_dev, "failed to add "
282 "P-state: %s\n", AcpiFormatException(rv));
283 ps->ps_freq = 0;
284 continue;
285 }
286
287 for (j = 0; j < i; j++) {
288
289 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
290 ps->ps_freq = 0;
291 break;
292 }
293 }
294
295 if (ps->ps_freq != 0)
296 count++;
297 }
298
299 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
300
301 out:
302 if (buf.Pointer != NULL)
303 ACPI_FREE(buf.Pointer);
304
305 return rv;
306 }
307
308 static ACPI_STATUS
acpicpu_pstate_pss_add(struct acpicpu_pstate * ps,ACPI_OBJECT * obj)309 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
310 {
311 ACPI_OBJECT *elm;
312 int i;
313
314 if (obj->Type != ACPI_TYPE_PACKAGE)
315 return AE_TYPE;
316
317 if (obj->Package.Count != 6)
318 return AE_BAD_DATA;
319
320 elm = obj->Package.Elements;
321
322 for (i = 0; i < 6; i++) {
323
324 if (elm[i].Type != ACPI_TYPE_INTEGER)
325 return AE_TYPE;
326
327 if (elm[i].Integer.Value > UINT32_MAX)
328 return AE_AML_NUMERIC_OVERFLOW;
329 }
330
331 ps->ps_freq = elm[0].Integer.Value;
332 ps->ps_power = elm[1].Integer.Value;
333 ps->ps_latency = elm[2].Integer.Value;
334 ps->ps_latency_bm = elm[3].Integer.Value;
335 ps->ps_control = elm[4].Integer.Value;
336 ps->ps_status = elm[5].Integer.Value;
337
338 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
339 return AE_BAD_DECIMAL_CONSTANT;
340
341 /*
342 * Sanity check also the latency levels. Some systems may
343 * report a value zero, but we keep one microsecond as the
344 * lower bound; see for instance AMD family 12h,
345 *
346 * Advanced Micro Devices: BIOS and Kernel Developer's
347 * Guide (BKDG) for AMD Family 12h Processors. Section
348 * 2.5.3.1.9.2, Revision 3.02, October, 2011.
349 */
350 if (ps->ps_latency == 0 || ps->ps_latency > 1000)
351 ps->ps_latency = 1;
352
353 return AE_OK;
354 }
355
356 static ACPI_STATUS
acpicpu_pstate_xpss(struct acpicpu_softc * sc)357 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
358 {
359 struct acpicpu_pstate *ps;
360 ACPI_OBJECT *obj;
361 ACPI_BUFFER buf;
362 ACPI_STATUS rv;
363 uint32_t i = 0;
364
365 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
366
367 if (ACPI_FAILURE(rv))
368 goto out;
369
370 obj = buf.Pointer;
371
372 if (obj->Type != ACPI_TYPE_PACKAGE) {
373 rv = AE_TYPE;
374 goto out;
375 }
376
377 if (obj->Package.Count != sc->sc_pstate_count) {
378 rv = AE_LIMIT;
379 goto out;
380 }
381
382 while (i < sc->sc_pstate_count) {
383
384 ps = &sc->sc_pstate[i];
385 acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
386
387 i++;
388 }
389
390 out:
391 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
392 aprint_error_dev(sc->sc_dev, "failed to evaluate "
393 "XPSS: %s\n", AcpiFormatException(rv));
394
395 if (buf.Pointer != NULL)
396 ACPI_FREE(buf.Pointer);
397
398 return rv;
399 }
400
401 static ACPI_STATUS
acpicpu_pstate_xpss_add(struct acpicpu_pstate * ps,ACPI_OBJECT * obj)402 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
403 {
404 ACPI_OBJECT *elm;
405 int i;
406
407 if (obj->Type != ACPI_TYPE_PACKAGE)
408 return AE_TYPE;
409
410 if (obj->Package.Count != 8)
411 return AE_BAD_DATA;
412
413 elm = obj->Package.Elements;
414
415 for (i = 0; i < 4; i++) {
416
417 if (elm[i].Type != ACPI_TYPE_INTEGER)
418 return AE_TYPE;
419
420 if (elm[i].Integer.Value > UINT32_MAX)
421 return AE_AML_NUMERIC_OVERFLOW;
422 }
423
424 for (; i < 8; i++) {
425
426 if (elm[i].Type != ACPI_TYPE_BUFFER)
427 return AE_TYPE;
428
429 if (elm[i].Buffer.Length != 8)
430 return AE_LIMIT;
431 }
432
433 /*
434 * Only overwrite the elements that were
435 * not available from the conventional _PSS.
436 */
437 if (ps->ps_freq == 0)
438 ps->ps_freq = elm[0].Integer.Value;
439
440 if (ps->ps_power == 0)
441 ps->ps_power = elm[1].Integer.Value;
442
443 if (ps->ps_latency == 0)
444 ps->ps_latency = elm[2].Integer.Value;
445
446 if (ps->ps_latency_bm == 0)
447 ps->ps_latency_bm = elm[3].Integer.Value;
448
449 if (ps->ps_control == 0)
450 ps->ps_control = ACPI_GET64(elm[4].Buffer.Pointer);
451
452 if (ps->ps_status == 0)
453 ps->ps_status = ACPI_GET64(elm[5].Buffer.Pointer);
454
455 if (ps->ps_control_mask == 0)
456 ps->ps_control_mask = ACPI_GET64(elm[6].Buffer.Pointer);
457
458 if (ps->ps_status_mask == 0)
459 ps->ps_status_mask = ACPI_GET64(elm[7].Buffer.Pointer);
460
461 ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
462
463 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
464 return AE_BAD_DECIMAL_CONSTANT;
465
466 if (ps->ps_latency == 0 || ps->ps_latency > 1000)
467 ps->ps_latency = 1;
468
469 return AE_OK;
470 }
471
472 static ACPI_STATUS
acpicpu_pstate_pct(struct acpicpu_softc * sc)473 acpicpu_pstate_pct(struct acpicpu_softc *sc)
474 {
475 static const size_t size = sizeof(struct acpicpu_reg);
476 struct acpicpu_reg *reg[2];
477 struct acpicpu_pstate *ps;
478 ACPI_OBJECT *elm, *obj;
479 ACPI_BUFFER buf;
480 ACPI_STATUS rv;
481 uint8_t width;
482 uint32_t i;
483
484 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
485
486 if (ACPI_FAILURE(rv))
487 return rv;
488
489 obj = buf.Pointer;
490
491 if (obj->Type != ACPI_TYPE_PACKAGE) {
492 rv = AE_TYPE;
493 goto out;
494 }
495
496 if (obj->Package.Count != 2) {
497 rv = AE_LIMIT;
498 goto out;
499 }
500
501 for (i = 0; i < 2; i++) {
502
503 elm = &obj->Package.Elements[i];
504
505 if (elm->Type != ACPI_TYPE_BUFFER) {
506 rv = AE_TYPE;
507 goto out;
508 }
509
510 if (size > elm->Buffer.Length) {
511 rv = AE_AML_BAD_RESOURCE_LENGTH;
512 goto out;
513 }
514
515 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
516
517 switch (reg[i]->reg_spaceid) {
518
519 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
520 case ACPI_ADR_SPACE_SYSTEM_IO:
521
522 if (reg[i]->reg_addr == 0) {
523 rv = AE_AML_ILLEGAL_ADDRESS;
524 goto out;
525 }
526
527 width = reg[i]->reg_bitwidth;
528
529 if (width + reg[i]->reg_bitoffset > 32) {
530 rv = AE_AML_BAD_RESOURCE_VALUE;
531 goto out;
532 }
533
534 if (width != 8 && width != 16 && width != 32) {
535 rv = AE_AML_BAD_RESOURCE_VALUE;
536 goto out;
537 }
538
539 break;
540
541 case ACPI_ADR_SPACE_FIXED_HARDWARE:
542
543 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
544
545 if (reg[i]->reg_bitwidth != 64) {
546 rv = AE_AML_BAD_RESOURCE_VALUE;
547 goto out;
548 }
549
550 if (reg[i]->reg_bitoffset != 0) {
551 rv = AE_AML_BAD_RESOURCE_VALUE;
552 goto out;
553 }
554
555 break;
556 }
557
558 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
559 rv = AE_SUPPORT;
560 goto out;
561 }
562
563 break;
564
565 default:
566 rv = AE_AML_INVALID_SPACE_ID;
567 goto out;
568 }
569 }
570
571 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
572 rv = AE_AML_INVALID_SPACE_ID;
573 goto out;
574 }
575
576 (void)memcpy(&sc->sc_pstate_control, reg[0], size);
577 (void)memcpy(&sc->sc_pstate_status, reg[1], size);
578
579 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
580
581 /*
582 * At the very least, mandate that
583 * XPSS supplies the control address.
584 */
585 if (sc->sc_pstate_control.reg_addr == 0) {
586 rv = AE_AML_BAD_RESOURCE_LENGTH;
587 goto out;
588 }
589
590 /*
591 * If XPSS is present, copy the supplied
592 * MSR addresses to the P-state structures.
593 */
594 for (i = 0; i < sc->sc_pstate_count; i++) {
595
596 ps = &sc->sc_pstate[i];
597
598 if (ps->ps_freq == 0)
599 continue;
600
601 ps->ps_status_addr = sc->sc_pstate_status.reg_addr;
602 ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
603 }
604 }
605
606 out:
607 if (buf.Pointer != NULL)
608 ACPI_FREE(buf.Pointer);
609
610 return rv;
611 }
612
613 static ACPI_STATUS
acpicpu_pstate_dep(struct acpicpu_softc * sc)614 acpicpu_pstate_dep(struct acpicpu_softc *sc)
615 {
616 ACPI_OBJECT *elm, *obj;
617 ACPI_BUFFER buf;
618 ACPI_STATUS rv;
619 uint32_t val;
620 uint8_t i, n;
621
622 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSD", &buf);
623
624 if (ACPI_FAILURE(rv))
625 goto out;
626
627 obj = buf.Pointer;
628
629 if (obj->Type != ACPI_TYPE_PACKAGE) {
630 rv = AE_TYPE;
631 goto out;
632 }
633
634 if (obj->Package.Count != 1) {
635 rv = AE_LIMIT;
636 goto out;
637 }
638
639 elm = &obj->Package.Elements[0];
640
641 if (obj->Type != ACPI_TYPE_PACKAGE) {
642 rv = AE_TYPE;
643 goto out;
644 }
645
646 n = elm->Package.Count;
647
648 if (n != 5) {
649 rv = AE_LIMIT;
650 goto out;
651 }
652
653 elm = elm->Package.Elements;
654
655 for (i = 0; i < n; i++) {
656
657 if (elm[i].Type != ACPI_TYPE_INTEGER) {
658 rv = AE_TYPE;
659 goto out;
660 }
661
662 if (elm[i].Integer.Value > UINT32_MAX) {
663 rv = AE_AML_NUMERIC_OVERFLOW;
664 goto out;
665 }
666 }
667
668 val = elm[1].Integer.Value;
669
670 if (val != 0)
671 aprint_debug_dev(sc->sc_dev, "invalid revision in _PSD\n");
672
673 val = elm[3].Integer.Value;
674
675 if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) {
676 rv = AE_AML_BAD_RESOURCE_VALUE;
677 goto out;
678 }
679
680 val = elm[4].Integer.Value;
681
682 if (val > sc->sc_ncpus) {
683 rv = AE_BAD_VALUE;
684 goto out;
685 }
686
687 sc->sc_pstate_dep.dep_domain = elm[2].Integer.Value;
688 sc->sc_pstate_dep.dep_type = elm[3].Integer.Value;
689 sc->sc_pstate_dep.dep_ncpus = elm[4].Integer.Value;
690
691 out:
692 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
693 aprint_debug_dev(sc->sc_dev, "failed to evaluate "
694 "_PSD: %s\n", AcpiFormatException(rv));
695
696 if (buf.Pointer != NULL)
697 ACPI_FREE(buf.Pointer);
698
699 return rv;
700 }
701
702 static int
acpicpu_pstate_max(struct acpicpu_softc * sc)703 acpicpu_pstate_max(struct acpicpu_softc *sc)
704 {
705 ACPI_INTEGER val;
706 ACPI_STATUS rv;
707
708 /*
709 * Evaluate the currently highest P-state that can be used.
710 * If available, we can use either this state or any lower
711 * power (i.e. higher numbered) state from the _PSS object.
712 * Note that the return value must match the _OST parameter.
713 */
714 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
715
716 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
717
718 if (sc->sc_pstate[val].ps_freq != 0) {
719 sc->sc_pstate_max = val;
720 return 0;
721 }
722 }
723
724 return 1;
725 }
726
727 static int
acpicpu_pstate_min(struct acpicpu_softc * sc)728 acpicpu_pstate_min(struct acpicpu_softc *sc)
729 {
730 ACPI_INTEGER val;
731 ACPI_STATUS rv;
732
733 /*
734 * The _PDL object defines the minimum when passive cooling
735 * is being performed. If available, we can use the returned
736 * state or any higher power (i.e. lower numbered) state.
737 */
738 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
739
740 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
741
742 if (sc->sc_pstate[val].ps_freq == 0)
743 return 1;
744
745 if (val >= sc->sc_pstate_max) {
746 sc->sc_pstate_min = val;
747 return 0;
748 }
749 }
750
751 return 1;
752 }
753
754 static void
acpicpu_pstate_change(struct acpicpu_softc * sc)755 acpicpu_pstate_change(struct acpicpu_softc *sc)
756 {
757 static ACPI_STATUS rv = AE_OK;
758 ACPI_OBJECT_LIST arg;
759 ACPI_OBJECT obj[2];
760 static int val = 0;
761
762 acpicpu_pstate_reset(sc);
763
764 /*
765 * Cache the checks as the optional
766 * _PDL and _OST are rarely present.
767 */
768 if (val == 0)
769 val = acpicpu_pstate_min(sc);
770
771 arg.Count = 2;
772 arg.Pointer = obj;
773
774 obj[0].Type = ACPI_TYPE_INTEGER;
775 obj[1].Type = ACPI_TYPE_INTEGER;
776
777 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
778 obj[1].Integer.Value = acpicpu_pstate_max(sc);
779
780 if (ACPI_FAILURE(rv))
781 return;
782
783 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
784 }
785
786 static void
acpicpu_pstate_reset(struct acpicpu_softc * sc)787 acpicpu_pstate_reset(struct acpicpu_softc *sc)
788 {
789
790 sc->sc_pstate_max = 0;
791 sc->sc_pstate_min = sc->sc_pstate_count - 1;
792
793 }
794
795 static void
acpicpu_pstate_bios(void)796 acpicpu_pstate_bios(void)
797 {
798 const uint8_t val = AcpiGbl_FADT.PstateControl;
799 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
800
801 if (addr == 0 || val == 0)
802 return;
803
804 (void)AcpiOsWritePort(addr, val, 8);
805 }
806
807 void
acpicpu_pstate_get(void * aux,void * cpu_freq)808 acpicpu_pstate_get(void *aux, void *cpu_freq)
809 {
810 struct acpicpu_pstate *ps = NULL;
811 struct cpu_info *ci = curcpu();
812 struct acpicpu_softc *sc;
813 uint32_t freq, i, val = 0;
814 int rv;
815
816 sc = acpicpu_sc[ci->ci_acpiid];
817
818 if (__predict_false(sc == NULL)) {
819 rv = ENXIO;
820 goto fail;
821 }
822
823 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
824 rv = ENODEV;
825 goto fail;
826 }
827
828 mutex_enter(&sc->sc_mtx);
829
830 /*
831 * Use the cached value, if available.
832 */
833 if (sc->sc_pstate_current != 0) {
834 *(uint32_t *)cpu_freq = sc->sc_pstate_current;
835 mutex_exit(&sc->sc_mtx);
836 return;
837 }
838
839 mutex_exit(&sc->sc_mtx);
840
841 switch (sc->sc_pstate_status.reg_spaceid) {
842
843 case ACPI_ADR_SPACE_FIXED_HARDWARE:
844
845 rv = acpicpu_md_pstate_get(sc, &freq);
846
847 if (__predict_false(rv != 0))
848 goto fail;
849
850 break;
851
852 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
853 case ACPI_ADR_SPACE_SYSTEM_IO:
854
855 val = acpicpu_readreg(&sc->sc_pstate_status);
856
857 if (val == 0) {
858 rv = EIO;
859 goto fail;
860 }
861
862 for (i = 0; i < sc->sc_pstate_count; i++) {
863
864 if (sc->sc_pstate[i].ps_freq == 0)
865 continue;
866
867 if (val == sc->sc_pstate[i].ps_status) {
868 ps = &sc->sc_pstate[i];
869 break;
870 }
871 }
872
873 if (ps == NULL) {
874 rv = EIO;
875 goto fail;
876 }
877
878 freq = ps->ps_freq;
879 break;
880
881 default:
882 rv = ENOTTY;
883 goto fail;
884 }
885
886 mutex_enter(&sc->sc_mtx);
887 sc->sc_pstate_current = freq;
888 *(uint32_t *)cpu_freq = freq;
889 mutex_exit(&sc->sc_mtx);
890
891 return;
892
893 fail:
894 aprint_error_dev(sc->sc_dev, "failed "
895 "to get frequency (err %d)\n", rv);
896
897 mutex_enter(&sc->sc_mtx);
898 sc->sc_pstate_current = 0;
899 *(uint32_t *)cpu_freq = 0;
900 mutex_exit(&sc->sc_mtx);
901 }
902
903 void
acpicpu_pstate_set(void * aux,void * cpu_freq)904 acpicpu_pstate_set(void *aux, void *cpu_freq)
905 {
906 struct acpicpu_pstate *ps = NULL;
907 struct cpu_info *ci = curcpu();
908 struct acpicpu_softc *sc;
909 uint32_t freq, i, val;
910 int rv;
911
912 freq = *(uint32_t *)cpu_freq;
913 sc = acpicpu_sc[ci->ci_acpiid];
914
915 if (__predict_false(sc == NULL)) {
916 rv = ENXIO;
917 goto fail;
918 }
919
920 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
921 rv = ENODEV;
922 goto fail;
923 }
924
925 mutex_enter(&sc->sc_mtx);
926
927 if (sc->sc_pstate_current == freq) {
928 mutex_exit(&sc->sc_mtx);
929 return;
930 }
931
932 /*
933 * Verify that the requested frequency is available.
934 *
935 * The access needs to be protected since the currently
936 * available maximum and minimum may change dynamically.
937 */
938 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
939
940 if (__predict_false(sc->sc_pstate[i].ps_freq == 0))
941 continue;
942
943 if (sc->sc_pstate[i].ps_freq == freq) {
944 ps = &sc->sc_pstate[i];
945 break;
946 }
947 }
948
949 mutex_exit(&sc->sc_mtx);
950
951 if (__predict_false(ps == NULL)) {
952 rv = EINVAL;
953 goto fail;
954 }
955
956 switch (sc->sc_pstate_control.reg_spaceid) {
957
958 case ACPI_ADR_SPACE_FIXED_HARDWARE:
959
960 rv = acpicpu_md_pstate_set(ps);
961
962 if (__predict_false(rv != 0))
963 goto fail;
964
965 break;
966
967 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
968 case ACPI_ADR_SPACE_SYSTEM_IO:
969
970 acpicpu_writereg(&sc->sc_pstate_control, ps->ps_control);
971
972 /*
973 * Some systems take longer to respond
974 * than the reported worst-case latency.
975 */
976 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
977
978 val = acpicpu_readreg(&sc->sc_pstate_status);
979
980 if (val == ps->ps_status)
981 break;
982
983 DELAY(ps->ps_latency);
984 }
985
986 if (i == ACPICPU_P_STATE_RETRY) {
987 rv = EAGAIN;
988 goto fail;
989 }
990
991 break;
992
993 default:
994 rv = ENOTTY;
995 goto fail;
996 }
997
998 mutex_enter(&sc->sc_mtx);
999 ps->ps_evcnt.ev_count++;
1000 sc->sc_pstate_current = freq;
1001 mutex_exit(&sc->sc_mtx);
1002
1003 return;
1004
1005 fail:
1006 if (rv != EINVAL)
1007 aprint_error_dev(sc->sc_dev, "failed to set "
1008 "frequency to %u (err %d)\n", freq, rv);
1009
1010 mutex_enter(&sc->sc_mtx);
1011 sc->sc_pstate_current = 0;
1012 mutex_exit(&sc->sc_mtx);
1013 }
1014