acpi_cpu_pstate.c revision 1.31 1 /* $NetBSD: acpi_cpu_pstate.c,v 1.31 2010/08/20 04:16:00 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.31 2010/08/20 04:16:00 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/evcnt.h>
34 #include <sys/kmem.h>
35 #include <sys/once.h>
36
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40
41 #define _COMPONENT ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME ("acpi_cpu_pstate")
43
44 static void acpicpu_pstate_attach_print(struct acpicpu_softc *);
45 static void acpicpu_pstate_attach_evcnt(struct acpicpu_softc *);
46 static void acpicpu_pstate_detach_evcnt(struct acpicpu_softc *);
47 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *);
48 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
49 ACPI_OBJECT *);
50 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
52 ACPI_OBJECT *);
53 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
54 static int acpicpu_pstate_max(struct acpicpu_softc *);
55 static int acpicpu_pstate_min(struct acpicpu_softc *);
56 static void acpicpu_pstate_change(struct acpicpu_softc *);
57 static void acpicpu_pstate_reset(struct acpicpu_softc *);
58 static void acpicpu_pstate_bios(void);
59
60 static uint32_t acpicpu_pstate_saved = 0;
61
62 void
63 acpicpu_pstate_attach(device_t self)
64 {
65 struct acpicpu_softc *sc = device_private(self);
66 const char *str;
67 ACPI_HANDLE tmp;
68 ACPI_STATUS rv;
69
70 rv = acpicpu_pstate_pss(sc);
71
72 if (ACPI_FAILURE(rv)) {
73 str = "_PSS";
74 goto fail;
75 }
76
77 /*
78 * Check the availability of extended _PSS.
79 * If present, this will override the data.
80 * Note that XPSS can not be used on Intel
81 * systems where _PDC or _OSC may be used.
82 */
83 if (sc->sc_cap == 0) {
84
85 rv = acpicpu_pstate_xpss(sc);
86
87 if (ACPI_SUCCESS(rv))
88 sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
89
90 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
91 str = "XPSS";
92 goto fail;
93 }
94 }
95
96 rv = acpicpu_pstate_pct(sc);
97
98 if (ACPI_FAILURE(rv)) {
99 str = "_PCT";
100 goto fail;
101 }
102
103 /*
104 * The ACPI 3.0 and 4.0 specifications mandate three
105 * objects for P-states: _PSS, _PCT, and _PPC. A less
106 * strict wording is however used in the earlier 2.0
107 * standard, and some systems conforming to ACPI 2.0
108 * do not have _PPC, the method for dynamic maximum.
109 */
110 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
111
112 if (ACPI_FAILURE(rv))
113 aprint_debug_dev(self, "_PPC missing\n");
114
115 /*
116 * Employ the XPSS structure by filling
117 * it with MD information required for FFH.
118 */
119 rv = acpicpu_md_pstate_pss(sc);
120
121 if (rv != 0) {
122 rv = AE_SUPPORT;
123 goto fail;
124 }
125
126 sc->sc_flags |= ACPICPU_FLAG_P;
127
128 acpicpu_pstate_bios();
129 acpicpu_pstate_reset(sc);
130 acpicpu_pstate_attach_evcnt(sc);
131 acpicpu_pstate_attach_print(sc);
132
133 return;
134
135 fail:
136 switch (rv) {
137
138 case AE_NOT_FOUND:
139 return;
140
141 case AE_SUPPORT:
142 aprint_verbose_dev(sc->sc_dev, "P-states not supported\n");
143 return;
144
145 default:
146 aprint_error_dev(sc->sc_dev, "failed to evaluate "
147 "%s: %s\n", str, AcpiFormatException(rv));
148 }
149 }
150
151 static void
152 acpicpu_pstate_attach_print(struct acpicpu_softc *sc)
153 {
154 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
155 struct acpicpu_pstate *ps;
156 static bool once = false;
157 const char *str;
158 uint32_t i;
159
160 if (once != false)
161 return;
162
163 str = (method != ACPI_ADR_SPACE_SYSTEM_IO) ? "FFH" : "I/O";
164
165 for (i = 0; i < sc->sc_pstate_count; i++) {
166
167 ps = &sc->sc_pstate[i];
168
169 if (ps->ps_freq == 0)
170 continue;
171
172 aprint_debug_dev(sc->sc_dev, "P%d: %3s, "
173 "lat %3u us, pow %5u mW, %4u MHz\n", i, str,
174 ps->ps_latency, ps->ps_power, ps->ps_freq);
175 }
176
177 once = true;
178 }
179
180 static void
181 acpicpu_pstate_attach_evcnt(struct acpicpu_softc *sc)
182 {
183 struct acpicpu_pstate *ps;
184 uint32_t i;
185
186 for (i = 0; i < sc->sc_pstate_count; i++) {
187
188 ps = &sc->sc_pstate[i];
189
190 if (ps->ps_freq == 0)
191 continue;
192
193 (void)snprintf(ps->ps_name, sizeof(ps->ps_name),
194 "P%u (%u MHz)", i, ps->ps_freq);
195
196 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
197 NULL, device_xname(sc->sc_dev), ps->ps_name);
198 }
199 }
200
201 int
202 acpicpu_pstate_detach(device_t self)
203 {
204 struct acpicpu_softc *sc = device_private(self);
205 static ONCE_DECL(once_detach);
206 size_t size;
207 int rv;
208
209 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
210 return 0;
211
212 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop);
213
214 if (rv != 0)
215 return rv;
216
217 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
218
219 if (sc->sc_pstate != NULL)
220 kmem_free(sc->sc_pstate, size);
221
222 sc->sc_flags &= ~ACPICPU_FLAG_P;
223 acpicpu_pstate_detach_evcnt(sc);
224
225 return 0;
226 }
227
228 static void
229 acpicpu_pstate_detach_evcnt(struct acpicpu_softc *sc)
230 {
231 struct acpicpu_pstate *ps;
232 uint32_t i;
233
234 for (i = 0; i < sc->sc_pstate_count; i++) {
235
236 ps = &sc->sc_pstate[i];
237
238 if (ps->ps_freq != 0)
239 evcnt_detach(&ps->ps_evcnt);
240 }
241 }
242
243 void
244 acpicpu_pstate_start(device_t self)
245 {
246 struct acpicpu_softc *sc = device_private(self);
247 struct acpicpu_pstate *ps;
248 uint32_t i;
249 int rv;
250
251 rv = acpicpu_md_pstate_start();
252
253 if (rv != 0)
254 goto fail;
255
256 /*
257 * Initialize the state to P0.
258 */
259 for (i = 0, rv = ENXIO; i < sc->sc_pstate_count; i++) {
260
261 ps = &sc->sc_pstate[i];
262
263 if (ps->ps_freq != 0) {
264 sc->sc_cold = false;
265 rv = acpicpu_pstate_set(sc, ps->ps_freq);
266 break;
267 }
268 }
269
270 if (rv != 0)
271 goto fail;
272
273 return;
274
275 fail:
276 sc->sc_flags &= ~ACPICPU_FLAG_P;
277 aprint_error_dev(self, "failed to start P-states (err %d)\n", rv);
278 }
279
280 bool
281 acpicpu_pstate_suspend(device_t self)
282 {
283 struct acpicpu_softc *sc = device_private(self);
284 struct acpicpu_pstate *ps = NULL;
285 int32_t i;
286
287 mutex_enter(&sc->sc_mtx);
288 acpicpu_pstate_reset(sc);
289 mutex_exit(&sc->sc_mtx);
290
291 if (acpicpu_pstate_saved != 0)
292 return true;
293
294 /*
295 * Following design notes for Windows, we set the highest
296 * P-state when entering any of the system sleep states.
297 * When resuming, the saved P-state will be restored.
298 *
299 * Microsoft Corporation: Windows Native Processor
300 * Performance Control. Version 1.1a, November, 2002.
301 */
302 for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
303
304 if (sc->sc_pstate[i].ps_freq != 0) {
305 ps = &sc->sc_pstate[i];
306 break;
307 }
308 }
309
310 if (__predict_false(ps == NULL))
311 return true;
312
313 mutex_enter(&sc->sc_mtx);
314 acpicpu_pstate_saved = sc->sc_pstate_current;
315 mutex_exit(&sc->sc_mtx);
316
317 if (acpicpu_pstate_saved == ps->ps_freq)
318 return true;
319
320 (void)acpicpu_pstate_set(sc, ps->ps_freq);
321
322 return true;
323 }
324
325 bool
326 acpicpu_pstate_resume(device_t self)
327 {
328 struct acpicpu_softc *sc = device_private(self);
329
330 if (acpicpu_pstate_saved != 0) {
331 (void)acpicpu_pstate_set(sc, acpicpu_pstate_saved);
332 acpicpu_pstate_saved = 0;
333 }
334
335 return true;
336 }
337
338 void
339 acpicpu_pstate_callback(void *aux)
340 {
341 struct acpicpu_softc *sc;
342 device_t self = aux;
343 uint32_t old, new;
344
345 sc = device_private(self);
346
347 mutex_enter(&sc->sc_mtx);
348 old = sc->sc_pstate_max;
349 acpicpu_pstate_change(sc);
350 new = sc->sc_pstate_max;
351 mutex_exit(&sc->sc_mtx);
352
353 if (old != new) {
354
355 aprint_debug_dev(sc->sc_dev, "maximum frequency "
356 "changed from P%u (%u MHz) to P%u (%u MHz)\n",
357 old, sc->sc_pstate[old].ps_freq, new,
358 sc->sc_pstate[sc->sc_pstate_max].ps_freq);
359 #if 0
360 /*
361 * If the maximum changed, proactively
362 * raise or lower the target frequency.
363 */
364 (void)acpicpu_pstate_set(sc, sc->sc_pstate[new].ps_freq);
365
366 #endif
367 }
368 }
369
370 ACPI_STATUS
371 acpicpu_pstate_pss(struct acpicpu_softc *sc)
372 {
373 struct acpicpu_pstate *ps;
374 ACPI_OBJECT *obj;
375 ACPI_BUFFER buf;
376 ACPI_STATUS rv;
377 uint32_t count;
378 uint32_t i, j;
379
380 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
381
382 if (ACPI_FAILURE(rv))
383 return rv;
384
385 obj = buf.Pointer;
386
387 if (obj->Type != ACPI_TYPE_PACKAGE) {
388 rv = AE_TYPE;
389 goto out;
390 }
391
392 sc->sc_pstate_count = obj->Package.Count;
393
394 if (sc->sc_pstate_count == 0) {
395 rv = AE_NOT_EXIST;
396 goto out;
397 }
398
399 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
400 rv = AE_LIMIT;
401 goto out;
402 }
403
404 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
405 sizeof(struct acpicpu_pstate), KM_SLEEP);
406
407 if (sc->sc_pstate == NULL) {
408 rv = AE_NO_MEMORY;
409 goto out;
410 }
411
412 for (count = i = 0; i < sc->sc_pstate_count; i++) {
413
414 ps = &sc->sc_pstate[i];
415 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
416
417 if (ACPI_FAILURE(rv)) {
418 ps->ps_freq = 0;
419 continue;
420 }
421
422 for (j = 0; j < i; j++) {
423
424 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
425 ps->ps_freq = 0;
426 break;
427 }
428 }
429
430 if (ps->ps_freq != 0)
431 count++;
432 }
433
434 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
435
436 out:
437 if (buf.Pointer != NULL)
438 ACPI_FREE(buf.Pointer);
439
440 return rv;
441 }
442
443 static ACPI_STATUS
444 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
445 {
446 ACPI_OBJECT *elm;
447 int i;
448
449 if (obj->Type != ACPI_TYPE_PACKAGE)
450 return AE_TYPE;
451
452 if (obj->Package.Count != 6)
453 return AE_BAD_DATA;
454
455 elm = obj->Package.Elements;
456
457 for (i = 0; i < 6; i++) {
458
459 if (elm[i].Type != ACPI_TYPE_INTEGER)
460 return AE_TYPE;
461
462 if (elm[i].Integer.Value > UINT32_MAX)
463 return AE_AML_NUMERIC_OVERFLOW;
464 }
465
466 ps->ps_freq = elm[0].Integer.Value;
467 ps->ps_power = elm[1].Integer.Value;
468 ps->ps_latency = elm[2].Integer.Value;
469 ps->ps_latency_bm = elm[3].Integer.Value;
470 ps->ps_control = elm[4].Integer.Value;
471 ps->ps_status = elm[5].Integer.Value;
472
473 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
474 return AE_BAD_DECIMAL_CONSTANT;
475
476 /*
477 * The latency is typically around 10 usec
478 * on Intel CPUs. Use that as the minimum.
479 */
480 if (ps->ps_latency < 10)
481 ps->ps_latency = 10;
482
483 return AE_OK;
484 }
485
486 static ACPI_STATUS
487 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
488 {
489 static const size_t size = sizeof(struct acpicpu_pstate);
490 struct acpicpu_pstate *ps;
491 ACPI_OBJECT *obj;
492 ACPI_BUFFER buf;
493 ACPI_STATUS rv;
494 uint32_t count;
495 uint32_t i, j;
496
497 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
498
499 if (ACPI_FAILURE(rv))
500 return rv;
501
502 obj = buf.Pointer;
503
504 if (obj->Type != ACPI_TYPE_PACKAGE) {
505 rv = AE_TYPE;
506 goto out;
507 }
508
509 count = obj->Package.Count;
510
511 if (count == 0) {
512 rv = AE_NOT_EXIST;
513 goto out;
514 }
515
516 if (count > ACPICPU_P_STATE_MAX) {
517 rv = AE_LIMIT;
518 goto out;
519 }
520
521 if (sc->sc_pstate != NULL)
522 kmem_free(sc->sc_pstate, sc->sc_pstate_count * size);
523
524 sc->sc_pstate = kmem_zalloc(count * size, KM_SLEEP);
525
526 if (sc->sc_pstate == NULL) {
527 rv = AE_NO_MEMORY;
528 goto out;
529 }
530
531 sc->sc_pstate_count = count;
532
533 for (count = i = 0; i < sc->sc_pstate_count; i++) {
534
535 ps = &sc->sc_pstate[i];
536 rv = acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
537
538 if (ACPI_FAILURE(rv)) {
539 ps->ps_freq = 0;
540 continue;
541 }
542
543 for (j = 0; j < i; j++) {
544
545 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
546 ps->ps_freq = 0;
547 break;
548 }
549 }
550
551 if (ps->ps_freq != 0)
552 count++;
553 }
554
555 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
556
557 out:
558 if (buf.Pointer != NULL)
559 ACPI_FREE(buf.Pointer);
560
561 return rv;
562 }
563
564 static ACPI_STATUS
565 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
566 {
567 static const size_t size = sizeof(uint64_t);
568 ACPI_OBJECT *elm;
569 int i;
570
571 if (obj->Type != ACPI_TYPE_PACKAGE)
572 return AE_TYPE;
573
574 if (obj->Package.Count != 8)
575 return AE_BAD_DATA;
576
577 elm = obj->Package.Elements;
578
579 for (i = 0; i < 4; i++) {
580
581 if (elm[i].Type != ACPI_TYPE_INTEGER)
582 return AE_TYPE;
583
584 if (elm[i].Integer.Value > UINT32_MAX)
585 return AE_AML_NUMERIC_OVERFLOW;
586 }
587
588 for (; i < 8; i++) {
589
590 if (elm[i].Type != ACPI_TYPE_BUFFER)
591 return AE_TYPE;
592
593 if (elm[i].Buffer.Length > size)
594 return AE_LIMIT;
595 }
596
597 ps->ps_freq = elm[0].Integer.Value;
598 ps->ps_power = elm[1].Integer.Value;
599 ps->ps_latency = elm[2].Integer.Value;
600 ps->ps_latency_bm = elm[3].Integer.Value;
601
602 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
603 return AE_BAD_DECIMAL_CONSTANT;
604
605 (void)memcpy(&ps->ps_control, elm[4].Buffer.Pointer, size);
606 (void)memcpy(&ps->ps_status, elm[5].Buffer.Pointer, size);
607
608 (void)memcpy(&ps->ps_control_mask, elm[6].Buffer.Pointer, size);
609 (void)memcpy(&ps->ps_status_mask, elm[7].Buffer.Pointer, size);
610
611 /*
612 * The latency is often defined to be
613 * zero on AMD systems. Raise that to 1.
614 */
615 if (ps->ps_latency == 0)
616 ps->ps_latency = 1;
617
618 ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
619
620 return AE_OK;
621 }
622
623 ACPI_STATUS
624 acpicpu_pstate_pct(struct acpicpu_softc *sc)
625 {
626 static const size_t size = sizeof(struct acpicpu_reg);
627 struct acpicpu_reg *reg[2];
628 struct acpicpu_pstate *ps;
629 ACPI_OBJECT *elm, *obj;
630 ACPI_BUFFER buf;
631 ACPI_STATUS rv;
632 uint8_t width;
633 uint32_t i;
634
635 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
636
637 if (ACPI_FAILURE(rv))
638 return rv;
639
640 obj = buf.Pointer;
641
642 if (obj->Type != ACPI_TYPE_PACKAGE) {
643 rv = AE_TYPE;
644 goto out;
645 }
646
647 if (obj->Package.Count != 2) {
648 rv = AE_LIMIT;
649 goto out;
650 }
651
652 for (i = 0; i < 2; i++) {
653
654 elm = &obj->Package.Elements[i];
655
656 if (elm->Type != ACPI_TYPE_BUFFER) {
657 rv = AE_TYPE;
658 goto out;
659 }
660
661 if (size > elm->Buffer.Length) {
662 rv = AE_AML_BAD_RESOURCE_LENGTH;
663 goto out;
664 }
665
666 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
667
668 switch (reg[i]->reg_spaceid) {
669
670 case ACPI_ADR_SPACE_SYSTEM_IO:
671
672 if (reg[i]->reg_addr == 0) {
673 rv = AE_AML_ILLEGAL_ADDRESS;
674 goto out;
675 }
676
677 width = reg[i]->reg_bitwidth;
678
679 if (width + reg[i]->reg_bitoffset > 32) {
680 rv = AE_AML_BAD_RESOURCE_VALUE;
681 goto out;
682 }
683
684 if (width != 8 && width != 16 && width != 32) {
685 rv = AE_AML_BAD_RESOURCE_VALUE;
686 goto out;
687 }
688
689 break;
690
691 case ACPI_ADR_SPACE_FIXED_HARDWARE:
692
693 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
694
695 if (reg[i]->reg_bitwidth != 64) {
696 rv = AE_AML_BAD_RESOURCE_VALUE;
697 goto out;
698 }
699
700 if (reg[i]->reg_bitoffset != 0) {
701 rv = AE_AML_BAD_RESOURCE_VALUE;
702 goto out;
703 }
704
705 break;
706 }
707
708 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
709 rv = AE_SUPPORT;
710 goto out;
711 }
712
713 break;
714
715 default:
716 rv = AE_AML_INVALID_SPACE_ID;
717 goto out;
718 }
719 }
720
721 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
722 rv = AE_AML_INVALID_SPACE_ID;
723 goto out;
724 }
725
726 (void)memcpy(&sc->sc_pstate_control, reg[0], size);
727 (void)memcpy(&sc->sc_pstate_status, reg[1], size);
728
729 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
730 goto out;
731
732 /*
733 * In XPSS the control address can not be zero,
734 * but the status address may be. In this case,
735 * comparable to T-states, we can ignore the status
736 * check during the P-state (FFH) transition.
737 */
738 if (sc->sc_pstate_control.reg_addr == 0) {
739 rv = AE_AML_BAD_RESOURCE_LENGTH;
740 goto out;
741 }
742
743 /*
744 * If XPSS is present, copy the MSR addresses
745 * to the P-state structures for convenience.
746 */
747 for (i = 0; i < sc->sc_pstate_count; i++) {
748
749 ps = &sc->sc_pstate[i];
750
751 if (ps->ps_freq == 0)
752 continue;
753
754 ps->ps_status_addr = sc->sc_pstate_status.reg_addr;
755 ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
756 }
757
758 out:
759 if (buf.Pointer != NULL)
760 ACPI_FREE(buf.Pointer);
761
762 return rv;
763 }
764
765 static int
766 acpicpu_pstate_max(struct acpicpu_softc *sc)
767 {
768 ACPI_INTEGER val;
769 ACPI_STATUS rv;
770
771 /*
772 * Evaluate the currently highest P-state that can be used.
773 * If available, we can use either this state or any lower
774 * power (i.e. higher numbered) state from the _PSS object.
775 * Note that the return value must match the _OST parameter.
776 */
777 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
778
779 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
780
781 if (sc->sc_pstate[val].ps_freq != 0) {
782 sc->sc_pstate_max = val;
783 return 0;
784 }
785 }
786
787 return 1;
788 }
789
790 static int
791 acpicpu_pstate_min(struct acpicpu_softc *sc)
792 {
793 ACPI_INTEGER val;
794 ACPI_STATUS rv;
795
796 /*
797 * The _PDL object defines the minimum when passive cooling
798 * is being performed. If available, we can use the returned
799 * state or any higher power (i.e. lower numbered) state.
800 */
801 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
802
803 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
804
805 if (sc->sc_pstate[val].ps_freq == 0)
806 return 1;
807
808 if (val >= sc->sc_pstate_max) {
809 sc->sc_pstate_min = val;
810 return 0;
811 }
812 }
813
814 return 1;
815 }
816
817 static void
818 acpicpu_pstate_change(struct acpicpu_softc *sc)
819 {
820 static ACPI_STATUS rv = AE_OK;
821 ACPI_OBJECT_LIST arg;
822 ACPI_OBJECT obj[2];
823
824 acpicpu_pstate_reset(sc);
825
826 arg.Count = 2;
827 arg.Pointer = obj;
828
829 obj[0].Type = ACPI_TYPE_INTEGER;
830 obj[1].Type = ACPI_TYPE_INTEGER;
831
832 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
833 obj[1].Integer.Value = acpicpu_pstate_max(sc);
834
835 if (sc->sc_passive != false)
836 (void)acpicpu_pstate_min(sc);
837
838 if (ACPI_FAILURE(rv))
839 return;
840
841 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
842 }
843
844 static void
845 acpicpu_pstate_reset(struct acpicpu_softc *sc)
846 {
847
848 sc->sc_pstate_max = 0;
849 sc->sc_pstate_min = sc->sc_pstate_count - 1;
850
851 }
852
853 static void
854 acpicpu_pstate_bios(void)
855 {
856 const uint8_t val = AcpiGbl_FADT.PstateControl;
857 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
858
859 if (addr == 0 || val == 0)
860 return;
861
862 (void)AcpiOsWritePort(addr, val, 8);
863 }
864
865 int
866 acpicpu_pstate_get(struct acpicpu_softc *sc, uint32_t *freq)
867 {
868 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
869 struct acpicpu_pstate *ps = NULL;
870 uint32_t i, val = 0;
871 uint64_t addr;
872 uint8_t width;
873 int rv;
874
875 if (sc->sc_cold != false) {
876 rv = EBUSY;
877 goto fail;
878 }
879
880 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
881 rv = ENODEV;
882 goto fail;
883 }
884
885 mutex_enter(&sc->sc_mtx);
886
887 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) {
888 *freq = sc->sc_pstate_current;
889 mutex_exit(&sc->sc_mtx);
890 return 0;
891 }
892
893 mutex_exit(&sc->sc_mtx);
894
895 switch (method) {
896
897 case ACPI_ADR_SPACE_FIXED_HARDWARE:
898
899 rv = acpicpu_md_pstate_get(sc, freq);
900
901 if (rv != 0)
902 goto fail;
903
904 break;
905
906 case ACPI_ADR_SPACE_SYSTEM_IO:
907
908 addr = sc->sc_pstate_status.reg_addr;
909 width = sc->sc_pstate_status.reg_bitwidth;
910
911 (void)AcpiOsReadPort(addr, &val, width);
912
913 if (val == 0) {
914 rv = EIO;
915 goto fail;
916 }
917
918 for (i = 0; i < sc->sc_pstate_count; i++) {
919
920 if (sc->sc_pstate[i].ps_freq == 0)
921 continue;
922
923 if (val == sc->sc_pstate[i].ps_status) {
924 ps = &sc->sc_pstate[i];
925 break;
926 }
927 }
928
929 if (__predict_false(ps == NULL)) {
930 rv = EIO;
931 goto fail;
932 }
933
934 *freq = ps->ps_freq;
935 break;
936
937 default:
938 rv = ENOTTY;
939 goto fail;
940 }
941
942 mutex_enter(&sc->sc_mtx);
943 sc->sc_pstate_current = *freq;
944 mutex_exit(&sc->sc_mtx);
945
946 return 0;
947
948 fail:
949 aprint_error_dev(sc->sc_dev, "failed "
950 "to get frequency (err %d)\n", rv);
951
952 mutex_enter(&sc->sc_mtx);
953 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
954 mutex_exit(&sc->sc_mtx);
955
956 return rv;
957 }
958
959 int
960 acpicpu_pstate_set(struct acpicpu_softc *sc, uint32_t freq)
961 {
962 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
963 struct acpicpu_pstate *ps = NULL;
964 uint32_t i, val;
965 uint64_t addr;
966 uint8_t width;
967 int rv;
968
969 if (sc->sc_cold != false) {
970 rv = EBUSY;
971 goto fail;
972 }
973
974 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
975 rv = ENODEV;
976 goto fail;
977 }
978
979 mutex_enter(&sc->sc_mtx);
980
981 if (sc->sc_pstate_current == freq) {
982 mutex_exit(&sc->sc_mtx);
983 return 0;
984 }
985
986 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
987
988 if (sc->sc_pstate[i].ps_freq == 0)
989 continue;
990
991 if (sc->sc_pstate[i].ps_freq == freq) {
992 ps = &sc->sc_pstate[i];
993 break;
994 }
995 }
996
997 mutex_exit(&sc->sc_mtx);
998
999 if (__predict_false(ps == NULL)) {
1000 rv = EINVAL;
1001 goto fail;
1002 }
1003
1004 switch (method) {
1005
1006 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1007
1008 rv = acpicpu_md_pstate_set(ps);
1009
1010 if (rv != 0)
1011 goto fail;
1012
1013 break;
1014
1015 case ACPI_ADR_SPACE_SYSTEM_IO:
1016
1017 addr = sc->sc_pstate_control.reg_addr;
1018 width = sc->sc_pstate_control.reg_bitwidth;
1019
1020 (void)AcpiOsWritePort(addr, ps->ps_control, width);
1021
1022 addr = sc->sc_pstate_status.reg_addr;
1023 width = sc->sc_pstate_status.reg_bitwidth;
1024
1025 /*
1026 * Some systems take longer to respond
1027 * than the reported worst-case latency.
1028 */
1029 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
1030
1031 (void)AcpiOsReadPort(addr, &val, width);
1032
1033 if (val == ps->ps_status)
1034 break;
1035
1036 DELAY(ps->ps_latency);
1037 }
1038
1039 if (i == ACPICPU_P_STATE_RETRY) {
1040 rv = EAGAIN;
1041 goto fail;
1042 }
1043
1044 break;
1045
1046 default:
1047 rv = ENOTTY;
1048 goto fail;
1049 }
1050
1051 mutex_enter(&sc->sc_mtx);
1052 ps->ps_evcnt.ev_count++;
1053 sc->sc_pstate_current = freq;
1054 mutex_exit(&sc->sc_mtx);
1055
1056 return 0;
1057
1058 fail:
1059 aprint_error_dev(sc->sc_dev, "failed to set "
1060 "frequency to %u (err %d)\n", freq, rv);
1061
1062 mutex_enter(&sc->sc_mtx);
1063 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
1064 mutex_exit(&sc->sc_mtx);
1065
1066 return rv;
1067 }
1068