acpi_cpu_pstate.c revision 1.13.2.3 1 /* $NetBSD: acpi_cpu_pstate.c,v 1.13.2.3 2010/10/09 03:32:04 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.13.2.3 2010/10/09 03:32:04 yamt Exp $");
31
32 #include <sys/param.h>
33 #include <sys/evcnt.h>
34 #include <sys/kmem.h>
35 #include <sys/once.h>
36
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40
41 #define _COMPONENT ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME ("acpi_cpu_pstate")
43
44 static void acpicpu_pstate_attach_print(struct acpicpu_softc *);
45 static void acpicpu_pstate_attach_evcnt(struct acpicpu_softc *);
46 static void acpicpu_pstate_detach_evcnt(struct acpicpu_softc *);
47 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *);
48 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
49 ACPI_OBJECT *);
50 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
52 ACPI_OBJECT *);
53 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
54 static int acpicpu_pstate_max(struct acpicpu_softc *);
55 static int acpicpu_pstate_min(struct acpicpu_softc *);
56 static void acpicpu_pstate_change(struct acpicpu_softc *);
57 static void acpicpu_pstate_reset(struct acpicpu_softc *);
58 static void acpicpu_pstate_bios(void);
59
60 static uint32_t acpicpu_pstate_saved = 0;
61
62 void
63 acpicpu_pstate_attach(device_t self)
64 {
65 struct acpicpu_softc *sc = device_private(self);
66 const char *str;
67 ACPI_HANDLE tmp;
68 ACPI_STATUS rv;
69
70 rv = acpicpu_pstate_pss(sc);
71
72 if (ACPI_FAILURE(rv)) {
73 str = "_PSS";
74 goto fail;
75 }
76
77 /*
78 * Check the availability of extended _PSS.
79 * If present, this will override the data.
80 * Note that XPSS can not be used on Intel
81 * systems where _PDC or _OSC may be used.
82 */
83 if (sc->sc_cap == 0) {
84 rv = acpicpu_pstate_xpss(sc);
85
86 if (ACPI_SUCCESS(rv))
87 sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
88 }
89
90 rv = acpicpu_pstate_pct(sc);
91
92 if (ACPI_FAILURE(rv)) {
93 str = "_PCT";
94 goto fail;
95 }
96
97 /*
98 * The ACPI 3.0 and 4.0 specifications mandate three
99 * objects for P-states: _PSS, _PCT, and _PPC. A less
100 * strict wording is however used in the earlier 2.0
101 * standard, and some systems conforming to ACPI 2.0
102 * do not have _PPC, the method for dynamic maximum.
103 */
104 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
105
106 if (ACPI_FAILURE(rv))
107 aprint_debug_dev(self, "_PPC missing\n");
108
109 /*
110 * Employ the XPSS structure by filling
111 * it with MD information required for FFH.
112 */
113 rv = acpicpu_md_pstate_pss(sc);
114
115 if (rv != 0) {
116 rv = AE_SUPPORT;
117 goto fail;
118 }
119
120 sc->sc_flags |= ACPICPU_FLAG_P;
121
122 acpicpu_pstate_bios();
123 acpicpu_pstate_reset(sc);
124 acpicpu_pstate_attach_evcnt(sc);
125 acpicpu_pstate_attach_print(sc);
126
127 return;
128
129 fail:
130 switch (rv) {
131
132 case AE_NOT_FOUND:
133 return;
134
135 case AE_SUPPORT:
136 aprint_verbose_dev(sc->sc_dev, "P-states not supported\n");
137 return;
138
139 default:
140 aprint_error_dev(sc->sc_dev, "failed to evaluate "
141 "%s: %s\n", str, AcpiFormatException(rv));
142 }
143 }
144
145 static void
146 acpicpu_pstate_attach_print(struct acpicpu_softc *sc)
147 {
148 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
149 struct acpicpu_pstate *ps;
150 static bool once = false;
151 const char *str;
152 uint32_t i;
153
154 if (once != false)
155 return;
156
157 str = (method != ACPI_ADR_SPACE_SYSTEM_IO) ? "FFH" : "I/O";
158
159 for (i = 0; i < sc->sc_pstate_count; i++) {
160
161 ps = &sc->sc_pstate[i];
162
163 if (ps->ps_freq == 0)
164 continue;
165
166 aprint_debug_dev(sc->sc_dev, "P%d: %3s, "
167 "lat %3u us, pow %5u mW, %4u MHz\n", i, str,
168 ps->ps_latency, ps->ps_power, ps->ps_freq);
169 }
170
171 once = true;
172 }
173
174 static void
175 acpicpu_pstate_attach_evcnt(struct acpicpu_softc *sc)
176 {
177 struct acpicpu_pstate *ps;
178 uint32_t i;
179
180 for (i = 0; i < sc->sc_pstate_count; i++) {
181
182 ps = &sc->sc_pstate[i];
183
184 if (ps->ps_freq == 0)
185 continue;
186
187 (void)snprintf(ps->ps_name, sizeof(ps->ps_name),
188 "P%u (%u MHz)", i, ps->ps_freq);
189
190 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
191 NULL, device_xname(sc->sc_dev), ps->ps_name);
192 }
193 }
194
195 int
196 acpicpu_pstate_detach(device_t self)
197 {
198 struct acpicpu_softc *sc = device_private(self);
199 static ONCE_DECL(once_detach);
200 size_t size;
201 int rv;
202
203 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
204 return 0;
205
206 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop);
207
208 if (rv != 0)
209 return rv;
210
211 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
212
213 if (sc->sc_pstate != NULL)
214 kmem_free(sc->sc_pstate, size);
215
216 sc->sc_flags &= ~ACPICPU_FLAG_P;
217 acpicpu_pstate_detach_evcnt(sc);
218
219 return 0;
220 }
221
222 static void
223 acpicpu_pstate_detach_evcnt(struct acpicpu_softc *sc)
224 {
225 struct acpicpu_pstate *ps;
226 uint32_t i;
227
228 for (i = 0; i < sc->sc_pstate_count; i++) {
229
230 ps = &sc->sc_pstate[i];
231
232 if (ps->ps_freq != 0)
233 evcnt_detach(&ps->ps_evcnt);
234 }
235 }
236
237 void
238 acpicpu_pstate_start(device_t self)
239 {
240 struct acpicpu_softc *sc = device_private(self);
241 struct acpicpu_pstate *ps;
242 uint32_t i;
243 int rv;
244
245 rv = acpicpu_md_pstate_start();
246
247 if (rv != 0)
248 goto fail;
249
250 /*
251 * Initialize the state to P0.
252 */
253 for (i = 0, rv = ENXIO; i < sc->sc_pstate_count; i++) {
254
255 ps = &sc->sc_pstate[i];
256
257 if (ps->ps_freq != 0) {
258 sc->sc_cold = false;
259 rv = acpicpu_pstate_set(sc, ps->ps_freq);
260 break;
261 }
262 }
263
264 if (rv != 0)
265 goto fail;
266
267 return;
268
269 fail:
270 sc->sc_flags &= ~ACPICPU_FLAG_P;
271
272 if (rv == EEXIST) {
273 aprint_error_dev(self, "driver conflicts with existing one\n");
274 return;
275 }
276
277 aprint_error_dev(self, "failed to start P-states (err %d)\n", rv);
278 }
279
280 bool
281 acpicpu_pstate_suspend(device_t self)
282 {
283 struct acpicpu_softc *sc = device_private(self);
284 struct acpicpu_pstate *ps = NULL;
285 int32_t i;
286
287 mutex_enter(&sc->sc_mtx);
288 acpicpu_pstate_reset(sc);
289 mutex_exit(&sc->sc_mtx);
290
291 if (acpicpu_pstate_saved != 0)
292 return true;
293
294 /*
295 * Following design notes for Windows, we set the highest
296 * P-state when entering any of the system sleep states.
297 * When resuming, the saved P-state will be restored.
298 *
299 * Microsoft Corporation: Windows Native Processor
300 * Performance Control. Version 1.1a, November, 2002.
301 */
302 for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
303
304 if (sc->sc_pstate[i].ps_freq != 0) {
305 ps = &sc->sc_pstate[i];
306 break;
307 }
308 }
309
310 if (__predict_false(ps == NULL))
311 return true;
312
313 mutex_enter(&sc->sc_mtx);
314 acpicpu_pstate_saved = sc->sc_pstate_current;
315 mutex_exit(&sc->sc_mtx);
316
317 if (acpicpu_pstate_saved == ps->ps_freq)
318 return true;
319
320 (void)acpicpu_pstate_set(sc, ps->ps_freq);
321
322 return true;
323 }
324
325 bool
326 acpicpu_pstate_resume(device_t self)
327 {
328 struct acpicpu_softc *sc = device_private(self);
329
330 if (acpicpu_pstate_saved != 0) {
331 (void)acpicpu_pstate_set(sc, acpicpu_pstate_saved);
332 acpicpu_pstate_saved = 0;
333 }
334
335 return true;
336 }
337
338 void
339 acpicpu_pstate_callback(void *aux)
340 {
341 struct acpicpu_softc *sc;
342 device_t self = aux;
343 uint32_t old, new;
344
345 sc = device_private(self);
346
347 mutex_enter(&sc->sc_mtx);
348 old = sc->sc_pstate_max;
349 acpicpu_pstate_change(sc);
350 new = sc->sc_pstate_max;
351 mutex_exit(&sc->sc_mtx);
352
353 if (old != new) {
354
355 aprint_debug_dev(sc->sc_dev, "maximum frequency "
356 "changed from P%u (%u MHz) to P%u (%u MHz)\n",
357 old, sc->sc_pstate[old].ps_freq, new,
358 sc->sc_pstate[sc->sc_pstate_max].ps_freq);
359 #if 0
360 /*
361 * If the maximum changed, proactively
362 * raise or lower the target frequency.
363 */
364 (void)acpicpu_pstate_set(sc, sc->sc_pstate[new].ps_freq);
365
366 #endif
367 }
368 }
369
370 ACPI_STATUS
371 acpicpu_pstate_pss(struct acpicpu_softc *sc)
372 {
373 struct acpicpu_pstate *ps;
374 ACPI_OBJECT *obj;
375 ACPI_BUFFER buf;
376 ACPI_STATUS rv;
377 uint32_t count;
378 uint32_t i, j;
379
380 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
381
382 if (ACPI_FAILURE(rv))
383 return rv;
384
385 obj = buf.Pointer;
386
387 if (obj->Type != ACPI_TYPE_PACKAGE) {
388 rv = AE_TYPE;
389 goto out;
390 }
391
392 sc->sc_pstate_count = obj->Package.Count;
393
394 if (sc->sc_pstate_count == 0) {
395 rv = AE_NOT_EXIST;
396 goto out;
397 }
398
399 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
400 rv = AE_LIMIT;
401 goto out;
402 }
403
404 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
405 sizeof(struct acpicpu_pstate), KM_SLEEP);
406
407 if (sc->sc_pstate == NULL) {
408 rv = AE_NO_MEMORY;
409 goto out;
410 }
411
412 for (count = i = 0; i < sc->sc_pstate_count; i++) {
413
414 ps = &sc->sc_pstate[i];
415 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
416
417 if (ACPI_FAILURE(rv)) {
418 ps->ps_freq = 0;
419 continue;
420 }
421
422 for (j = 0; j < i; j++) {
423
424 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
425 ps->ps_freq = 0;
426 break;
427 }
428 }
429
430 if (ps->ps_freq != 0)
431 count++;
432 }
433
434 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
435
436 out:
437 if (buf.Pointer != NULL)
438 ACPI_FREE(buf.Pointer);
439
440 return rv;
441 }
442
443 static ACPI_STATUS
444 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
445 {
446 ACPI_OBJECT *elm;
447 int i;
448
449 if (obj->Type != ACPI_TYPE_PACKAGE)
450 return AE_TYPE;
451
452 if (obj->Package.Count != 6)
453 return AE_BAD_DATA;
454
455 elm = obj->Package.Elements;
456
457 for (i = 0; i < 6; i++) {
458
459 if (elm[i].Type != ACPI_TYPE_INTEGER)
460 return AE_TYPE;
461
462 if (elm[i].Integer.Value > UINT32_MAX)
463 return AE_AML_NUMERIC_OVERFLOW;
464 }
465
466 ps->ps_freq = elm[0].Integer.Value;
467 ps->ps_power = elm[1].Integer.Value;
468 ps->ps_latency = elm[2].Integer.Value;
469 ps->ps_latency_bm = elm[3].Integer.Value;
470 ps->ps_control = elm[4].Integer.Value;
471 ps->ps_status = elm[5].Integer.Value;
472
473 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
474 return AE_BAD_DECIMAL_CONSTANT;
475
476 /*
477 * The latency is typically around 10 usec
478 * on Intel CPUs. Use that as the minimum.
479 */
480 if (ps->ps_latency < 10)
481 ps->ps_latency = 10;
482
483 return AE_OK;
484 }
485
486 static ACPI_STATUS
487 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
488 {
489 static const size_t size = sizeof(struct acpicpu_pstate);
490 struct acpicpu_pstate *ps, *pstate = NULL;
491 ACPI_OBJECT *obj;
492 ACPI_BUFFER buf;
493 ACPI_STATUS rv;
494 uint32_t count, pstate_count;
495 uint32_t i, j;
496
497 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
498
499 if (ACPI_FAILURE(rv))
500 return rv;
501
502 obj = buf.Pointer;
503
504 if (obj->Type != ACPI_TYPE_PACKAGE) {
505 rv = AE_TYPE;
506 goto out;
507 }
508
509 pstate_count = count = obj->Package.Count;
510
511 if (count == 0) {
512 rv = AE_NOT_EXIST;
513 goto out;
514 }
515
516 if (count > ACPICPU_P_STATE_MAX) {
517 rv = AE_LIMIT;
518 goto out;
519 }
520
521 pstate = kmem_zalloc(count * size, KM_SLEEP);
522
523 if (pstate == NULL) {
524 rv = AE_NO_MEMORY;
525 goto out;
526 }
527
528 for (count = i = 0; i < pstate_count; i++) {
529
530 ps = &pstate[i];
531 rv = acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
532
533 if (ACPI_FAILURE(rv)) {
534 ps->ps_freq = 0;
535 continue;
536 }
537
538 for (j = 0; j < i; j++) {
539
540 if (ps->ps_freq >= pstate[j].ps_freq) {
541 ps->ps_freq = 0;
542 break;
543 }
544 }
545
546 if (ps->ps_freq != 0)
547 count++;
548 }
549
550 if (count > 0) {
551 if (sc->sc_pstate != NULL)
552 kmem_free(sc->sc_pstate, sc->sc_pstate_count * size);
553 sc->sc_pstate = pstate;
554 sc->sc_pstate_count = pstate_count;
555 rv = AE_OK;
556 } else {
557 kmem_free(pstate, pstate_count * size);
558 rv = AE_NOT_EXIST;
559 }
560
561 out:
562 if (buf.Pointer != NULL)
563 ACPI_FREE(buf.Pointer);
564
565 return rv;
566 }
567
568 static ACPI_STATUS
569 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
570 {
571 ACPI_OBJECT *elm;
572 int i;
573
574 if (obj->Type != ACPI_TYPE_PACKAGE)
575 return AE_TYPE;
576
577 if (obj->Package.Count != 8)
578 return AE_BAD_DATA;
579
580 elm = obj->Package.Elements;
581
582 for (i = 0; i < 4; i++) {
583
584 if (elm[i].Type != ACPI_TYPE_INTEGER)
585 return AE_TYPE;
586
587 if (elm[i].Integer.Value > UINT32_MAX)
588 return AE_AML_NUMERIC_OVERFLOW;
589 }
590
591 for (; i < 8; i++) {
592
593 if (elm[i].Type != ACPI_TYPE_BUFFER)
594 return AE_TYPE;
595
596 if (elm[i].Buffer.Length != 8)
597 return AE_LIMIT;
598 }
599
600 ps->ps_freq = elm[0].Integer.Value;
601 ps->ps_power = elm[1].Integer.Value;
602 ps->ps_latency = elm[2].Integer.Value;
603 ps->ps_latency_bm = elm[3].Integer.Value;
604
605 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
606 return AE_BAD_DECIMAL_CONSTANT;
607
608 ps->ps_control = ACPI_GET64(elm[4].Buffer.Pointer);
609 ps->ps_status = ACPI_GET64(elm[5].Buffer.Pointer);
610 ps->ps_control_mask = ACPI_GET64(elm[6].Buffer.Pointer);
611 ps->ps_status_mask = ACPI_GET64(elm[7].Buffer.Pointer);
612
613 /*
614 * The latency is often defined to be
615 * zero on AMD systems. Raise that to 1.
616 */
617 if (ps->ps_latency == 0)
618 ps->ps_latency = 1;
619
620 ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
621
622 return AE_OK;
623 }
624
625 ACPI_STATUS
626 acpicpu_pstate_pct(struct acpicpu_softc *sc)
627 {
628 static const size_t size = sizeof(struct acpicpu_reg);
629 struct acpicpu_reg *reg[2];
630 struct acpicpu_pstate *ps;
631 ACPI_OBJECT *elm, *obj;
632 ACPI_BUFFER buf;
633 ACPI_STATUS rv;
634 uint8_t width;
635 uint32_t i;
636
637 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
638
639 if (ACPI_FAILURE(rv))
640 return rv;
641
642 obj = buf.Pointer;
643
644 if (obj->Type != ACPI_TYPE_PACKAGE) {
645 rv = AE_TYPE;
646 goto out;
647 }
648
649 if (obj->Package.Count != 2) {
650 rv = AE_LIMIT;
651 goto out;
652 }
653
654 for (i = 0; i < 2; i++) {
655
656 elm = &obj->Package.Elements[i];
657
658 if (elm->Type != ACPI_TYPE_BUFFER) {
659 rv = AE_TYPE;
660 goto out;
661 }
662
663 if (size > elm->Buffer.Length) {
664 rv = AE_AML_BAD_RESOURCE_LENGTH;
665 goto out;
666 }
667
668 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
669
670 switch (reg[i]->reg_spaceid) {
671
672 case ACPI_ADR_SPACE_SYSTEM_IO:
673
674 if (reg[i]->reg_addr == 0) {
675 rv = AE_AML_ILLEGAL_ADDRESS;
676 goto out;
677 }
678
679 width = reg[i]->reg_bitwidth;
680
681 if (width + reg[i]->reg_bitoffset > 32) {
682 rv = AE_AML_BAD_RESOURCE_VALUE;
683 goto out;
684 }
685
686 if (width != 8 && width != 16 && width != 32) {
687 rv = AE_AML_BAD_RESOURCE_VALUE;
688 goto out;
689 }
690
691 break;
692
693 case ACPI_ADR_SPACE_FIXED_HARDWARE:
694
695 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
696
697 if (reg[i]->reg_bitwidth != 64) {
698 rv = AE_AML_BAD_RESOURCE_VALUE;
699 goto out;
700 }
701
702 if (reg[i]->reg_bitoffset != 0) {
703 rv = AE_AML_BAD_RESOURCE_VALUE;
704 goto out;
705 }
706
707 break;
708 }
709
710 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
711 rv = AE_SUPPORT;
712 goto out;
713 }
714
715 break;
716
717 default:
718 rv = AE_AML_INVALID_SPACE_ID;
719 goto out;
720 }
721 }
722
723 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
724 rv = AE_AML_INVALID_SPACE_ID;
725 goto out;
726 }
727
728 (void)memcpy(&sc->sc_pstate_control, reg[0], size);
729 (void)memcpy(&sc->sc_pstate_status, reg[1], size);
730
731 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
732 goto out;
733
734 /*
735 * In XPSS the control address can not be zero,
736 * but the status address may be. In this case,
737 * comparable to T-states, we can ignore the status
738 * check during the P-state (FFH) transition.
739 */
740 if (sc->sc_pstate_control.reg_addr == 0) {
741 rv = AE_AML_BAD_RESOURCE_LENGTH;
742 goto out;
743 }
744
745 /*
746 * If XPSS is present, copy the MSR addresses
747 * to the P-state structures for convenience.
748 */
749 for (i = 0; i < sc->sc_pstate_count; i++) {
750
751 ps = &sc->sc_pstate[i];
752
753 if (ps->ps_freq == 0)
754 continue;
755
756 ps->ps_status_addr = sc->sc_pstate_status.reg_addr;
757 ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
758 }
759
760 out:
761 if (buf.Pointer != NULL)
762 ACPI_FREE(buf.Pointer);
763
764 return rv;
765 }
766
767 static int
768 acpicpu_pstate_max(struct acpicpu_softc *sc)
769 {
770 ACPI_INTEGER val;
771 ACPI_STATUS rv;
772
773 /*
774 * Evaluate the currently highest P-state that can be used.
775 * If available, we can use either this state or any lower
776 * power (i.e. higher numbered) state from the _PSS object.
777 * Note that the return value must match the _OST parameter.
778 */
779 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
780
781 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
782
783 if (sc->sc_pstate[val].ps_freq != 0) {
784 sc->sc_pstate_max = val;
785 return 0;
786 }
787 }
788
789 return 1;
790 }
791
792 static int
793 acpicpu_pstate_min(struct acpicpu_softc *sc)
794 {
795 ACPI_INTEGER val;
796 ACPI_STATUS rv;
797
798 /*
799 * The _PDL object defines the minimum when passive cooling
800 * is being performed. If available, we can use the returned
801 * state or any higher power (i.e. lower numbered) state.
802 */
803 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
804
805 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
806
807 if (sc->sc_pstate[val].ps_freq == 0)
808 return 1;
809
810 if (val >= sc->sc_pstate_max) {
811 sc->sc_pstate_min = val;
812 return 0;
813 }
814 }
815
816 return 1;
817 }
818
819 static void
820 acpicpu_pstate_change(struct acpicpu_softc *sc)
821 {
822 static ACPI_STATUS rv = AE_OK;
823 ACPI_OBJECT_LIST arg;
824 ACPI_OBJECT obj[2];
825
826 acpicpu_pstate_reset(sc);
827
828 arg.Count = 2;
829 arg.Pointer = obj;
830
831 obj[0].Type = ACPI_TYPE_INTEGER;
832 obj[1].Type = ACPI_TYPE_INTEGER;
833
834 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
835 obj[1].Integer.Value = acpicpu_pstate_max(sc);
836
837 if (sc->sc_passive != false)
838 (void)acpicpu_pstate_min(sc);
839
840 if (ACPI_FAILURE(rv))
841 return;
842
843 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
844 }
845
846 static void
847 acpicpu_pstate_reset(struct acpicpu_softc *sc)
848 {
849
850 sc->sc_pstate_max = 0;
851 sc->sc_pstate_min = sc->sc_pstate_count - 1;
852
853 }
854
855 static void
856 acpicpu_pstate_bios(void)
857 {
858 const uint8_t val = AcpiGbl_FADT.PstateControl;
859 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
860
861 if (addr == 0 || val == 0)
862 return;
863
864 (void)AcpiOsWritePort(addr, val, 8);
865 }
866
867 int
868 acpicpu_pstate_get(struct acpicpu_softc *sc, uint32_t *freq)
869 {
870 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
871 struct acpicpu_pstate *ps = NULL;
872 uint32_t i, val = 0;
873 uint64_t addr;
874 uint8_t width;
875 int rv;
876
877 if (sc->sc_cold != false) {
878 rv = EBUSY;
879 goto fail;
880 }
881
882 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
883 rv = ENODEV;
884 goto fail;
885 }
886
887 mutex_enter(&sc->sc_mtx);
888
889 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) {
890 *freq = sc->sc_pstate_current;
891 mutex_exit(&sc->sc_mtx);
892 return 0;
893 }
894
895 mutex_exit(&sc->sc_mtx);
896
897 switch (method) {
898
899 case ACPI_ADR_SPACE_FIXED_HARDWARE:
900
901 rv = acpicpu_md_pstate_get(sc, freq);
902
903 if (rv != 0)
904 goto fail;
905
906 break;
907
908 case ACPI_ADR_SPACE_SYSTEM_IO:
909
910 addr = sc->sc_pstate_status.reg_addr;
911 width = sc->sc_pstate_status.reg_bitwidth;
912
913 (void)AcpiOsReadPort(addr, &val, width);
914
915 if (val == 0) {
916 rv = EIO;
917 goto fail;
918 }
919
920 for (i = 0; i < sc->sc_pstate_count; i++) {
921
922 if (sc->sc_pstate[i].ps_freq == 0)
923 continue;
924
925 if (val == sc->sc_pstate[i].ps_status) {
926 ps = &sc->sc_pstate[i];
927 break;
928 }
929 }
930
931 if (__predict_false(ps == NULL)) {
932 rv = EIO;
933 goto fail;
934 }
935
936 *freq = ps->ps_freq;
937 break;
938
939 default:
940 rv = ENOTTY;
941 goto fail;
942 }
943
944 mutex_enter(&sc->sc_mtx);
945 sc->sc_pstate_current = *freq;
946 mutex_exit(&sc->sc_mtx);
947
948 return 0;
949
950 fail:
951 aprint_error_dev(sc->sc_dev, "failed "
952 "to get frequency (err %d)\n", rv);
953
954 mutex_enter(&sc->sc_mtx);
955 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
956 mutex_exit(&sc->sc_mtx);
957
958 return rv;
959 }
960
961 int
962 acpicpu_pstate_set(struct acpicpu_softc *sc, uint32_t freq)
963 {
964 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
965 struct acpicpu_pstate *ps = NULL;
966 uint32_t i, val;
967 uint64_t addr;
968 uint8_t width;
969 int rv;
970
971 if (sc->sc_cold != false) {
972 rv = EBUSY;
973 goto fail;
974 }
975
976 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
977 rv = ENODEV;
978 goto fail;
979 }
980
981 mutex_enter(&sc->sc_mtx);
982
983 if (sc->sc_pstate_current == freq) {
984 mutex_exit(&sc->sc_mtx);
985 return 0;
986 }
987
988 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
989
990 if (sc->sc_pstate[i].ps_freq == 0)
991 continue;
992
993 if (sc->sc_pstate[i].ps_freq == freq) {
994 ps = &sc->sc_pstate[i];
995 break;
996 }
997 }
998
999 mutex_exit(&sc->sc_mtx);
1000
1001 if (__predict_false(ps == NULL)) {
1002 rv = EINVAL;
1003 goto fail;
1004 }
1005
1006 switch (method) {
1007
1008 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1009
1010 rv = acpicpu_md_pstate_set(ps);
1011
1012 if (rv != 0)
1013 goto fail;
1014
1015 break;
1016
1017 case ACPI_ADR_SPACE_SYSTEM_IO:
1018
1019 addr = sc->sc_pstate_control.reg_addr;
1020 width = sc->sc_pstate_control.reg_bitwidth;
1021
1022 (void)AcpiOsWritePort(addr, ps->ps_control, width);
1023
1024 addr = sc->sc_pstate_status.reg_addr;
1025 width = sc->sc_pstate_status.reg_bitwidth;
1026
1027 /*
1028 * Some systems take longer to respond
1029 * than the reported worst-case latency.
1030 */
1031 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
1032
1033 (void)AcpiOsReadPort(addr, &val, width);
1034
1035 if (val == ps->ps_status)
1036 break;
1037
1038 DELAY(ps->ps_latency);
1039 }
1040
1041 if (i == ACPICPU_P_STATE_RETRY) {
1042 rv = EAGAIN;
1043 goto fail;
1044 }
1045
1046 break;
1047
1048 default:
1049 rv = ENOTTY;
1050 goto fail;
1051 }
1052
1053 mutex_enter(&sc->sc_mtx);
1054 ps->ps_evcnt.ev_count++;
1055 sc->sc_pstate_current = freq;
1056 mutex_exit(&sc->sc_mtx);
1057
1058 return 0;
1059
1060 fail:
1061 aprint_error_dev(sc->sc_dev, "failed to set "
1062 "frequency to %u (err %d)\n", freq, rv);
1063
1064 mutex_enter(&sc->sc_mtx);
1065 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
1066 mutex_exit(&sc->sc_mtx);
1067
1068 return rv;
1069 }
1070