acpi_cpu_pstate.c revision 1.26.2.4 1 /* $NetBSD: acpi_cpu_pstate.c,v 1.26.2.4 2010/11/06 08:08:27 uebayasi Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.26.2.4 2010/11/06 08:08:27 uebayasi Exp $");
31
32 #include <sys/param.h>
33 #include <sys/evcnt.h>
34 #include <sys/kmem.h>
35 #include <sys/once.h>
36
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40
41 #define _COMPONENT ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME ("acpi_cpu_pstate")
43
44 static void acpicpu_pstate_attach_print(struct acpicpu_softc *);
45 static void acpicpu_pstate_attach_evcnt(struct acpicpu_softc *);
46 static void acpicpu_pstate_detach_evcnt(struct acpicpu_softc *);
47 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *);
48 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
49 ACPI_OBJECT *);
50 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
52 ACPI_OBJECT *);
53 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
54 static int acpicpu_pstate_max(struct acpicpu_softc *);
55 static int acpicpu_pstate_min(struct acpicpu_softc *);
56 static void acpicpu_pstate_change(struct acpicpu_softc *);
57 static void acpicpu_pstate_reset(struct acpicpu_softc *);
58 static void acpicpu_pstate_bios(void);
59
60 static uint32_t acpicpu_pstate_saved = 0;
61
62 void
63 acpicpu_pstate_attach(device_t self)
64 {
65 struct acpicpu_softc *sc = device_private(self);
66 const char *str;
67 ACPI_HANDLE tmp;
68 ACPI_STATUS rv;
69
70 rv = acpicpu_pstate_pss(sc);
71
72 if (ACPI_FAILURE(rv)) {
73 str = "_PSS";
74 goto fail;
75 }
76
77 /*
78 * Append additional information from the
79 * extended _PSS, if available. Note that
80 * XPSS can not be used on Intel systems
81 * that use either _PDC or _OSC.
82 */
83 if (sc->sc_cap == 0) {
84
85 rv = acpicpu_pstate_xpss(sc);
86
87 if (ACPI_SUCCESS(rv))
88 sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
89 }
90
91 rv = acpicpu_pstate_pct(sc);
92
93 if (ACPI_FAILURE(rv)) {
94 str = "_PCT";
95 goto fail;
96 }
97
98 /*
99 * The ACPI 3.0 and 4.0 specifications mandate three
100 * objects for P-states: _PSS, _PCT, and _PPC. A less
101 * strict wording is however used in the earlier 2.0
102 * standard, and some systems conforming to ACPI 2.0
103 * do not have _PPC, the method for dynamic maximum.
104 */
105 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
106
107 if (ACPI_FAILURE(rv))
108 aprint_debug_dev(self, "_PPC missing\n");
109
110 /*
111 * Employ the XPSS structure by filling
112 * it with MD information required for FFH.
113 */
114 rv = acpicpu_md_pstate_pss(sc);
115
116 if (rv != 0) {
117 rv = AE_SUPPORT;
118 goto fail;
119 }
120
121 sc->sc_flags |= ACPICPU_FLAG_P;
122
123 acpicpu_pstate_bios();
124 acpicpu_pstate_reset(sc);
125 acpicpu_pstate_attach_evcnt(sc);
126 acpicpu_pstate_attach_print(sc);
127
128 return;
129
130 fail:
131 switch (rv) {
132
133 case AE_NOT_FOUND:
134 return;
135
136 case AE_SUPPORT:
137 aprint_verbose_dev(sc->sc_dev, "P-states not supported\n");
138 return;
139
140 default:
141 aprint_error_dev(sc->sc_dev, "failed to evaluate "
142 "%s: %s\n", str, AcpiFormatException(rv));
143 }
144 }
145
146 static void
147 acpicpu_pstate_attach_print(struct acpicpu_softc *sc)
148 {
149 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
150 struct acpicpu_pstate *ps;
151 static bool once = false;
152 const char *str;
153 uint32_t i;
154
155 if (once != false)
156 return;
157
158 str = (method != ACPI_ADR_SPACE_SYSTEM_IO) ? "FFH" : "I/O";
159
160 for (i = 0; i < sc->sc_pstate_count; i++) {
161
162 ps = &sc->sc_pstate[i];
163
164 if (ps->ps_freq == 0)
165 continue;
166
167 aprint_debug_dev(sc->sc_dev, "P%d: %3s, "
168 "lat %3u us, pow %5u mW, %4u MHz\n", i, str,
169 ps->ps_latency, ps->ps_power, ps->ps_freq);
170 }
171
172 once = true;
173 }
174
175 static void
176 acpicpu_pstate_attach_evcnt(struct acpicpu_softc *sc)
177 {
178 struct acpicpu_pstate *ps;
179 uint32_t i;
180
181 for (i = 0; i < sc->sc_pstate_count; i++) {
182
183 ps = &sc->sc_pstate[i];
184
185 if (ps->ps_freq == 0)
186 continue;
187
188 (void)snprintf(ps->ps_name, sizeof(ps->ps_name),
189 "P%u (%u MHz)", i, ps->ps_freq);
190
191 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
192 NULL, device_xname(sc->sc_dev), ps->ps_name);
193 }
194 }
195
196 int
197 acpicpu_pstate_detach(device_t self)
198 {
199 struct acpicpu_softc *sc = device_private(self);
200 static ONCE_DECL(once_detach);
201 size_t size;
202 int rv;
203
204 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
205 return 0;
206
207 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop);
208
209 if (rv != 0)
210 return rv;
211
212 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
213
214 if (sc->sc_pstate != NULL)
215 kmem_free(sc->sc_pstate, size);
216
217 sc->sc_flags &= ~ACPICPU_FLAG_P;
218 acpicpu_pstate_detach_evcnt(sc);
219
220 return 0;
221 }
222
223 static void
224 acpicpu_pstate_detach_evcnt(struct acpicpu_softc *sc)
225 {
226 struct acpicpu_pstate *ps;
227 uint32_t i;
228
229 for (i = 0; i < sc->sc_pstate_count; i++) {
230
231 ps = &sc->sc_pstate[i];
232
233 if (ps->ps_freq != 0)
234 evcnt_detach(&ps->ps_evcnt);
235 }
236 }
237
238 void
239 acpicpu_pstate_start(device_t self)
240 {
241 struct acpicpu_softc *sc = device_private(self);
242 struct acpicpu_pstate *ps;
243 uint32_t i;
244 int rv;
245
246 rv = acpicpu_md_pstate_start();
247
248 if (rv != 0)
249 goto fail;
250
251 /*
252 * Initialize the state to P0.
253 */
254 for (i = 0, rv = ENXIO; i < sc->sc_pstate_count; i++) {
255
256 ps = &sc->sc_pstate[i];
257
258 if (ps->ps_freq != 0) {
259 sc->sc_cold = false;
260 rv = acpicpu_pstate_set(sc, ps->ps_freq);
261 break;
262 }
263 }
264
265 if (rv != 0)
266 goto fail;
267
268 return;
269
270 fail:
271 sc->sc_flags &= ~ACPICPU_FLAG_P;
272
273 if (rv == EEXIST) {
274 aprint_error_dev(self, "driver conflicts with existing one\n");
275 return;
276 }
277
278 aprint_error_dev(self, "failed to start P-states (err %d)\n", rv);
279 }
280
281 bool
282 acpicpu_pstate_suspend(device_t self)
283 {
284 struct acpicpu_softc *sc = device_private(self);
285 struct acpicpu_pstate *ps = NULL;
286 int32_t i;
287
288 mutex_enter(&sc->sc_mtx);
289 acpicpu_pstate_reset(sc);
290 mutex_exit(&sc->sc_mtx);
291
292 if (acpicpu_pstate_saved != 0)
293 return true;
294
295 /*
296 * Following design notes for Windows, we set the highest
297 * P-state when entering any of the system sleep states.
298 * When resuming, the saved P-state will be restored.
299 *
300 * Microsoft Corporation: Windows Native Processor
301 * Performance Control. Version 1.1a, November, 2002.
302 */
303 for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
304
305 if (sc->sc_pstate[i].ps_freq != 0) {
306 ps = &sc->sc_pstate[i];
307 break;
308 }
309 }
310
311 if (__predict_false(ps == NULL))
312 return true;
313
314 mutex_enter(&sc->sc_mtx);
315 acpicpu_pstate_saved = sc->sc_pstate_current;
316 mutex_exit(&sc->sc_mtx);
317
318 if (acpicpu_pstate_saved == ps->ps_freq)
319 return true;
320
321 (void)acpicpu_pstate_set(sc, ps->ps_freq);
322
323 return true;
324 }
325
326 bool
327 acpicpu_pstate_resume(device_t self)
328 {
329 struct acpicpu_softc *sc = device_private(self);
330
331 if (acpicpu_pstate_saved != 0) {
332 (void)acpicpu_pstate_set(sc, acpicpu_pstate_saved);
333 acpicpu_pstate_saved = 0;
334 }
335
336 return true;
337 }
338
339 void
340 acpicpu_pstate_callback(void *aux)
341 {
342 struct acpicpu_softc *sc;
343 device_t self = aux;
344 uint32_t old, new;
345
346 sc = device_private(self);
347
348 mutex_enter(&sc->sc_mtx);
349 old = sc->sc_pstate_max;
350 acpicpu_pstate_change(sc);
351 new = sc->sc_pstate_max;
352 mutex_exit(&sc->sc_mtx);
353
354 if (old != new) {
355
356 aprint_debug_dev(sc->sc_dev, "maximum frequency "
357 "changed from P%u (%u MHz) to P%u (%u MHz)\n",
358 old, sc->sc_pstate[old].ps_freq, new,
359 sc->sc_pstate[sc->sc_pstate_max].ps_freq);
360 #if 0
361 /*
362 * If the maximum changed, proactively
363 * raise or lower the target frequency.
364 */
365 (void)acpicpu_pstate_set(sc, sc->sc_pstate[new].ps_freq);
366
367 #endif
368 }
369 }
370
371 ACPI_STATUS
372 acpicpu_pstate_pss(struct acpicpu_softc *sc)
373 {
374 struct acpicpu_pstate *ps;
375 ACPI_OBJECT *obj;
376 ACPI_BUFFER buf;
377 ACPI_STATUS rv;
378 uint32_t count;
379 uint32_t i, j;
380
381 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
382
383 if (ACPI_FAILURE(rv))
384 return rv;
385
386 obj = buf.Pointer;
387
388 if (obj->Type != ACPI_TYPE_PACKAGE) {
389 rv = AE_TYPE;
390 goto out;
391 }
392
393 sc->sc_pstate_count = obj->Package.Count;
394
395 if (sc->sc_pstate_count == 0) {
396 rv = AE_NOT_EXIST;
397 goto out;
398 }
399
400 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
401 rv = AE_LIMIT;
402 goto out;
403 }
404
405 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
406 sizeof(struct acpicpu_pstate), KM_SLEEP);
407
408 if (sc->sc_pstate == NULL) {
409 rv = AE_NO_MEMORY;
410 goto out;
411 }
412
413 for (count = i = 0; i < sc->sc_pstate_count; i++) {
414
415 ps = &sc->sc_pstate[i];
416 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
417
418 if (ACPI_FAILURE(rv)) {
419 ps->ps_freq = 0;
420 continue;
421 }
422
423 for (j = 0; j < i; j++) {
424
425 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
426 ps->ps_freq = 0;
427 break;
428 }
429 }
430
431 if (ps->ps_freq != 0)
432 count++;
433 }
434
435 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
436
437 out:
438 if (buf.Pointer != NULL)
439 ACPI_FREE(buf.Pointer);
440
441 return rv;
442 }
443
444 static ACPI_STATUS
445 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
446 {
447 ACPI_OBJECT *elm;
448 int i;
449
450 if (obj->Type != ACPI_TYPE_PACKAGE)
451 return AE_TYPE;
452
453 if (obj->Package.Count != 6)
454 return AE_BAD_DATA;
455
456 elm = obj->Package.Elements;
457
458 for (i = 0; i < 6; i++) {
459
460 if (elm[i].Type != ACPI_TYPE_INTEGER)
461 return AE_TYPE;
462
463 if (elm[i].Integer.Value > UINT32_MAX)
464 return AE_AML_NUMERIC_OVERFLOW;
465 }
466
467 ps->ps_freq = elm[0].Integer.Value;
468 ps->ps_power = elm[1].Integer.Value;
469 ps->ps_latency = elm[2].Integer.Value;
470 ps->ps_latency_bm = elm[3].Integer.Value;
471 ps->ps_control = elm[4].Integer.Value;
472 ps->ps_status = elm[5].Integer.Value;
473
474 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
475 return AE_BAD_DECIMAL_CONSTANT;
476
477 /*
478 * The latency is typically around 10 usec
479 * on Intel CPUs. Use that as the minimum.
480 */
481 if (ps->ps_latency < 10)
482 ps->ps_latency = 10;
483
484 return AE_OK;
485 }
486
487 static ACPI_STATUS
488 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
489 {
490 struct acpicpu_pstate *ps;
491 ACPI_OBJECT *obj;
492 ACPI_BUFFER buf;
493 ACPI_STATUS rv;
494 uint32_t i = 0;
495
496 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
497
498 if (ACPI_FAILURE(rv))
499 return rv;
500
501 obj = buf.Pointer;
502
503 if (obj->Type != ACPI_TYPE_PACKAGE) {
504 rv = AE_TYPE;
505 goto out;
506 }
507
508 if (obj->Package.Count != sc->sc_pstate_count) {
509 rv = AE_LIMIT;
510 goto out;
511 }
512
513 while (i < sc->sc_pstate_count) {
514
515 ps = &sc->sc_pstate[i];
516 acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
517
518 i++;
519 }
520
521 out:
522 if (buf.Pointer != NULL)
523 ACPI_FREE(buf.Pointer);
524
525 return rv;
526 }
527
528 static ACPI_STATUS
529 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
530 {
531 ACPI_OBJECT *elm;
532 int i;
533
534 if (obj->Type != ACPI_TYPE_PACKAGE)
535 return AE_TYPE;
536
537 if (obj->Package.Count != 8)
538 return AE_BAD_DATA;
539
540 elm = obj->Package.Elements;
541
542 for (i = 0; i < 4; i++) {
543
544 if (elm[i].Type != ACPI_TYPE_INTEGER)
545 return AE_TYPE;
546
547 if (elm[i].Integer.Value > UINT32_MAX)
548 return AE_AML_NUMERIC_OVERFLOW;
549 }
550
551 for (; i < 8; i++) {
552
553 if (elm[i].Type != ACPI_TYPE_BUFFER)
554 return AE_TYPE;
555
556 if (elm[i].Buffer.Length != 8)
557 return AE_LIMIT;
558 }
559
560 /*
561 * Only overwrite the elements that were
562 * not available from the conventional _PSS.
563 */
564 if (ps->ps_freq == 0)
565 ps->ps_freq = elm[0].Integer.Value;
566
567 if (ps->ps_power == 0)
568 ps->ps_power = elm[1].Integer.Value;
569
570 if (ps->ps_latency == 0)
571 ps->ps_latency = elm[2].Integer.Value;
572
573 if (ps->ps_latency_bm == 0)
574 ps->ps_latency_bm = elm[3].Integer.Value;
575
576 if (ps->ps_control == 0)
577 ps->ps_control = ACPI_GET64(elm[4].Buffer.Pointer);
578
579 if (ps->ps_status == 0)
580 ps->ps_status = ACPI_GET64(elm[5].Buffer.Pointer);
581
582 if (ps->ps_control_mask == 0)
583 ps->ps_control_mask = ACPI_GET64(elm[6].Buffer.Pointer);
584
585 if (ps->ps_status_mask == 0)
586 ps->ps_status_mask = ACPI_GET64(elm[7].Buffer.Pointer);
587
588 /*
589 * The latency is often defined to be
590 * zero on AMD systems. Raise that to 1.
591 */
592 if (ps->ps_latency == 0)
593 ps->ps_latency = 1;
594
595 ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
596
597 if (ps->ps_freq > 9999)
598 return AE_BAD_DECIMAL_CONSTANT;
599
600 return AE_OK;
601 }
602
603 ACPI_STATUS
604 acpicpu_pstate_pct(struct acpicpu_softc *sc)
605 {
606 static const size_t size = sizeof(struct acpicpu_reg);
607 struct acpicpu_reg *reg[2];
608 struct acpicpu_pstate *ps;
609 ACPI_OBJECT *elm, *obj;
610 ACPI_BUFFER buf;
611 ACPI_STATUS rv;
612 uint8_t width;
613 uint32_t i;
614
615 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
616
617 if (ACPI_FAILURE(rv))
618 return rv;
619
620 obj = buf.Pointer;
621
622 if (obj->Type != ACPI_TYPE_PACKAGE) {
623 rv = AE_TYPE;
624 goto out;
625 }
626
627 if (obj->Package.Count != 2) {
628 rv = AE_LIMIT;
629 goto out;
630 }
631
632 for (i = 0; i < 2; i++) {
633
634 elm = &obj->Package.Elements[i];
635
636 if (elm->Type != ACPI_TYPE_BUFFER) {
637 rv = AE_TYPE;
638 goto out;
639 }
640
641 if (size > elm->Buffer.Length) {
642 rv = AE_AML_BAD_RESOURCE_LENGTH;
643 goto out;
644 }
645
646 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
647
648 switch (reg[i]->reg_spaceid) {
649
650 case ACPI_ADR_SPACE_SYSTEM_IO:
651
652 if (reg[i]->reg_addr == 0) {
653 rv = AE_AML_ILLEGAL_ADDRESS;
654 goto out;
655 }
656
657 width = reg[i]->reg_bitwidth;
658
659 if (width + reg[i]->reg_bitoffset > 32) {
660 rv = AE_AML_BAD_RESOURCE_VALUE;
661 goto out;
662 }
663
664 if (width != 8 && width != 16 && width != 32) {
665 rv = AE_AML_BAD_RESOURCE_VALUE;
666 goto out;
667 }
668
669 break;
670
671 case ACPI_ADR_SPACE_FIXED_HARDWARE:
672
673 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
674
675 if (reg[i]->reg_bitwidth != 64) {
676 rv = AE_AML_BAD_RESOURCE_VALUE;
677 goto out;
678 }
679
680 if (reg[i]->reg_bitoffset != 0) {
681 rv = AE_AML_BAD_RESOURCE_VALUE;
682 goto out;
683 }
684
685 break;
686 }
687
688 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
689 rv = AE_SUPPORT;
690 goto out;
691 }
692
693 break;
694
695 default:
696 rv = AE_AML_INVALID_SPACE_ID;
697 goto out;
698 }
699 }
700
701 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
702 rv = AE_AML_INVALID_SPACE_ID;
703 goto out;
704 }
705
706 (void)memcpy(&sc->sc_pstate_control, reg[0], size);
707 (void)memcpy(&sc->sc_pstate_status, reg[1], size);
708
709 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
710 goto out;
711
712 /*
713 * In XPSS the control address can not be zero,
714 * but the status address may be. In this case,
715 * comparable to T-states, we can ignore the status
716 * check during the P-state (FFH) transition.
717 */
718 if (sc->sc_pstate_control.reg_addr == 0) {
719 rv = AE_AML_BAD_RESOURCE_LENGTH;
720 goto out;
721 }
722
723 /*
724 * If XPSS is present, copy the MSR addresses
725 * to the P-state structures for convenience.
726 */
727 for (i = 0; i < sc->sc_pstate_count; i++) {
728
729 ps = &sc->sc_pstate[i];
730
731 if (ps->ps_freq == 0)
732 continue;
733
734 ps->ps_status_addr = sc->sc_pstate_status.reg_addr;
735 ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
736 }
737
738 out:
739 if (buf.Pointer != NULL)
740 ACPI_FREE(buf.Pointer);
741
742 return rv;
743 }
744
745 static int
746 acpicpu_pstate_max(struct acpicpu_softc *sc)
747 {
748 ACPI_INTEGER val;
749 ACPI_STATUS rv;
750
751 /*
752 * Evaluate the currently highest P-state that can be used.
753 * If available, we can use either this state or any lower
754 * power (i.e. higher numbered) state from the _PSS object.
755 * Note that the return value must match the _OST parameter.
756 */
757 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
758
759 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
760
761 if (sc->sc_pstate[val].ps_freq != 0) {
762 sc->sc_pstate_max = val;
763 return 0;
764 }
765 }
766
767 return 1;
768 }
769
770 static int
771 acpicpu_pstate_min(struct acpicpu_softc *sc)
772 {
773 ACPI_INTEGER val;
774 ACPI_STATUS rv;
775
776 /*
777 * The _PDL object defines the minimum when passive cooling
778 * is being performed. If available, we can use the returned
779 * state or any higher power (i.e. lower numbered) state.
780 */
781 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
782
783 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
784
785 if (sc->sc_pstate[val].ps_freq == 0)
786 return 1;
787
788 if (val >= sc->sc_pstate_max) {
789 sc->sc_pstate_min = val;
790 return 0;
791 }
792 }
793
794 return 1;
795 }
796
797 static void
798 acpicpu_pstate_change(struct acpicpu_softc *sc)
799 {
800 static ACPI_STATUS rv = AE_OK;
801 ACPI_OBJECT_LIST arg;
802 ACPI_OBJECT obj[2];
803
804 acpicpu_pstate_reset(sc);
805
806 arg.Count = 2;
807 arg.Pointer = obj;
808
809 obj[0].Type = ACPI_TYPE_INTEGER;
810 obj[1].Type = ACPI_TYPE_INTEGER;
811
812 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
813 obj[1].Integer.Value = acpicpu_pstate_max(sc);
814
815 if (sc->sc_passive != false)
816 (void)acpicpu_pstate_min(sc);
817
818 if (ACPI_FAILURE(rv))
819 return;
820
821 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
822 }
823
824 static void
825 acpicpu_pstate_reset(struct acpicpu_softc *sc)
826 {
827
828 sc->sc_pstate_max = 0;
829 sc->sc_pstate_min = sc->sc_pstate_count - 1;
830
831 }
832
833 static void
834 acpicpu_pstate_bios(void)
835 {
836 const uint8_t val = AcpiGbl_FADT.PstateControl;
837 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
838
839 if (addr == 0 || val == 0)
840 return;
841
842 (void)AcpiOsWritePort(addr, val, 8);
843 }
844
845 int
846 acpicpu_pstate_get(struct acpicpu_softc *sc, uint32_t *freq)
847 {
848 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
849 struct acpicpu_pstate *ps = NULL;
850 uint32_t i, val = 0;
851 uint64_t addr;
852 uint8_t width;
853 int rv;
854
855 if (sc->sc_cold != false) {
856 rv = EBUSY;
857 goto fail;
858 }
859
860 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
861 rv = ENODEV;
862 goto fail;
863 }
864
865 mutex_enter(&sc->sc_mtx);
866
867 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) {
868 *freq = sc->sc_pstate_current;
869 mutex_exit(&sc->sc_mtx);
870 return 0;
871 }
872
873 mutex_exit(&sc->sc_mtx);
874
875 switch (method) {
876
877 case ACPI_ADR_SPACE_FIXED_HARDWARE:
878
879 rv = acpicpu_md_pstate_get(sc, freq);
880
881 if (rv != 0)
882 goto fail;
883
884 break;
885
886 case ACPI_ADR_SPACE_SYSTEM_IO:
887
888 addr = sc->sc_pstate_status.reg_addr;
889 width = sc->sc_pstate_status.reg_bitwidth;
890
891 (void)AcpiOsReadPort(addr, &val, width);
892
893 if (val == 0) {
894 rv = EIO;
895 goto fail;
896 }
897
898 for (i = 0; i < sc->sc_pstate_count; i++) {
899
900 if (sc->sc_pstate[i].ps_freq == 0)
901 continue;
902
903 if (val == sc->sc_pstate[i].ps_status) {
904 ps = &sc->sc_pstate[i];
905 break;
906 }
907 }
908
909 if (__predict_false(ps == NULL)) {
910 rv = EIO;
911 goto fail;
912 }
913
914 *freq = ps->ps_freq;
915 break;
916
917 default:
918 rv = ENOTTY;
919 goto fail;
920 }
921
922 mutex_enter(&sc->sc_mtx);
923 sc->sc_pstate_current = *freq;
924 mutex_exit(&sc->sc_mtx);
925
926 return 0;
927
928 fail:
929 aprint_error_dev(sc->sc_dev, "failed "
930 "to get frequency (err %d)\n", rv);
931
932 mutex_enter(&sc->sc_mtx);
933 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
934 mutex_exit(&sc->sc_mtx);
935
936 return rv;
937 }
938
939 int
940 acpicpu_pstate_set(struct acpicpu_softc *sc, uint32_t freq)
941 {
942 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
943 struct acpicpu_pstate *ps = NULL;
944 uint32_t i, val;
945 uint64_t addr;
946 uint8_t width;
947 int rv;
948
949 if (sc->sc_cold != false) {
950 rv = EBUSY;
951 goto fail;
952 }
953
954 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
955 rv = ENODEV;
956 goto fail;
957 }
958
959 mutex_enter(&sc->sc_mtx);
960
961 if (sc->sc_pstate_current == freq) {
962 mutex_exit(&sc->sc_mtx);
963 return 0;
964 }
965
966 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
967
968 if (sc->sc_pstate[i].ps_freq == 0)
969 continue;
970
971 if (sc->sc_pstate[i].ps_freq == freq) {
972 ps = &sc->sc_pstate[i];
973 break;
974 }
975 }
976
977 mutex_exit(&sc->sc_mtx);
978
979 if (__predict_false(ps == NULL)) {
980 rv = EINVAL;
981 goto fail;
982 }
983
984 switch (method) {
985
986 case ACPI_ADR_SPACE_FIXED_HARDWARE:
987
988 rv = acpicpu_md_pstate_set(ps);
989
990 if (rv != 0)
991 goto fail;
992
993 break;
994
995 case ACPI_ADR_SPACE_SYSTEM_IO:
996
997 addr = sc->sc_pstate_control.reg_addr;
998 width = sc->sc_pstate_control.reg_bitwidth;
999
1000 (void)AcpiOsWritePort(addr, ps->ps_control, width);
1001
1002 addr = sc->sc_pstate_status.reg_addr;
1003 width = sc->sc_pstate_status.reg_bitwidth;
1004
1005 /*
1006 * Some systems take longer to respond
1007 * than the reported worst-case latency.
1008 */
1009 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
1010
1011 (void)AcpiOsReadPort(addr, &val, width);
1012
1013 if (val == ps->ps_status)
1014 break;
1015
1016 DELAY(ps->ps_latency);
1017 }
1018
1019 if (i == ACPICPU_P_STATE_RETRY) {
1020 rv = EAGAIN;
1021 goto fail;
1022 }
1023
1024 break;
1025
1026 default:
1027 rv = ENOTTY;
1028 goto fail;
1029 }
1030
1031 mutex_enter(&sc->sc_mtx);
1032 ps->ps_evcnt.ev_count++;
1033 sc->sc_pstate_current = freq;
1034 mutex_exit(&sc->sc_mtx);
1035
1036 return 0;
1037
1038 fail:
1039 aprint_error_dev(sc->sc_dev, "failed to set "
1040 "frequency to %u (err %d)\n", freq, rv);
1041
1042 mutex_enter(&sc->sc_mtx);
1043 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
1044 mutex_exit(&sc->sc_mtx);
1045
1046 return rv;
1047 }
1048