acpi_cpu_pstate.c revision 1.37 1 /* $NetBSD: acpi_cpu_pstate.c,v 1.37 2011/01/30 08:55:52 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.37 2011/01/30 08:55:52 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/evcnt.h>
34 #include <sys/kmem.h>
35 #include <sys/once.h>
36
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40
41 #define _COMPONENT ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME ("acpi_cpu_pstate")
43
44 static void acpicpu_pstate_attach_print(struct acpicpu_softc *);
45 static void acpicpu_pstate_attach_evcnt(struct acpicpu_softc *);
46 static void acpicpu_pstate_detach_evcnt(struct acpicpu_softc *);
47 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *);
48 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
49 ACPI_OBJECT *);
50 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
52 ACPI_OBJECT *);
53 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
54 static int acpicpu_pstate_max(struct acpicpu_softc *);
55 static int acpicpu_pstate_min(struct acpicpu_softc *);
56 static void acpicpu_pstate_change(struct acpicpu_softc *);
57 static void acpicpu_pstate_reset(struct acpicpu_softc *);
58 static void acpicpu_pstate_bios(void);
59
60 static uint32_t acpicpu_pstate_saved = 0;
61
62 void
63 acpicpu_pstate_attach(device_t self)
64 {
65 struct acpicpu_softc *sc = device_private(self);
66 const char *str;
67 ACPI_HANDLE tmp;
68 ACPI_STATUS rv;
69
70 rv = acpicpu_pstate_pss(sc);
71
72 if (ACPI_FAILURE(rv)) {
73 str = "_PSS";
74 goto fail;
75 }
76
77 /*
78 * Append additional information from the extended _PSS,
79 * if available. Note that XPSS can not be used on Intel
80 * systems that use either _PDC or _OSC. From the XPSS
81 * method specification:
82 *
83 * "The platform must not require the use of the
84 * optional _PDC or _OSC methods to coordinate
85 * between the operating system and firmware for
86 * the purposes of enabling specific processor
87 * power management features or implementations."
88 */
89 if (sc->sc_cap == 0) {
90
91 rv = acpicpu_pstate_xpss(sc);
92
93 if (ACPI_SUCCESS(rv))
94 sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
95 }
96
97 rv = acpicpu_pstate_pct(sc);
98
99 if (ACPI_FAILURE(rv)) {
100 str = "_PCT";
101 goto fail;
102 }
103
104 /*
105 * The ACPI 3.0 and 4.0 specifications mandate three
106 * objects for P-states: _PSS, _PCT, and _PPC. A less
107 * strict wording is however used in the earlier 2.0
108 * standard, and some systems conforming to ACPI 2.0
109 * do not have _PPC, the method for dynamic maximum.
110 */
111 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
112
113 if (ACPI_FAILURE(rv))
114 aprint_debug_dev(self, "_PPC missing\n");
115
116 /*
117 * Employ the XPSS structure by filling
118 * it with MD information required for FFH.
119 */
120 rv = acpicpu_md_pstate_pss(sc);
121
122 if (rv != 0) {
123 rv = AE_SUPPORT;
124 goto fail;
125 }
126
127 sc->sc_flags |= ACPICPU_FLAG_P;
128
129 acpicpu_pstate_bios();
130 acpicpu_pstate_reset(sc);
131 acpicpu_pstate_attach_evcnt(sc);
132 acpicpu_pstate_attach_print(sc);
133
134 return;
135
136 fail:
137 switch (rv) {
138
139 case AE_NOT_FOUND:
140 return;
141
142 case AE_SUPPORT:
143 aprint_verbose_dev(self, "P-states not supported\n");
144 return;
145
146 default:
147 aprint_error_dev(self, "failed to evaluate "
148 "%s: %s\n", str, AcpiFormatException(rv));
149 }
150 }
151
152 static void
153 acpicpu_pstate_attach_print(struct acpicpu_softc *sc)
154 {
155 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
156 struct acpicpu_pstate *ps;
157 static bool once = false;
158 const char *str;
159 uint32_t i;
160
161 if (once != false)
162 return;
163
164 str = (method != ACPI_ADR_SPACE_SYSTEM_IO) ? "FFH" : "I/O";
165
166 for (i = 0; i < sc->sc_pstate_count; i++) {
167
168 ps = &sc->sc_pstate[i];
169
170 if (ps->ps_freq == 0)
171 continue;
172
173 aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
174 "lat %3u us, pow %5u mW, %4u MHz\n", i, str,
175 ps->ps_latency, ps->ps_power, ps->ps_freq);
176 }
177
178 once = true;
179 }
180
181 static void
182 acpicpu_pstate_attach_evcnt(struct acpicpu_softc *sc)
183 {
184 struct acpicpu_pstate *ps;
185 uint32_t i;
186
187 for (i = 0; i < sc->sc_pstate_count; i++) {
188
189 ps = &sc->sc_pstate[i];
190
191 if (ps->ps_freq == 0)
192 continue;
193
194 (void)snprintf(ps->ps_name, sizeof(ps->ps_name),
195 "P%u (%u MHz)", i, ps->ps_freq);
196
197 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
198 NULL, device_xname(sc->sc_dev), ps->ps_name);
199 }
200 }
201
202 int
203 acpicpu_pstate_detach(device_t self)
204 {
205 struct acpicpu_softc *sc = device_private(self);
206 static ONCE_DECL(once_detach);
207 size_t size;
208 int rv;
209
210 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
211 return 0;
212
213 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop);
214
215 if (rv != 0)
216 return rv;
217
218 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
219
220 if (sc->sc_pstate != NULL)
221 kmem_free(sc->sc_pstate, size);
222
223 sc->sc_flags &= ~ACPICPU_FLAG_P;
224 acpicpu_pstate_detach_evcnt(sc);
225
226 return 0;
227 }
228
229 static void
230 acpicpu_pstate_detach_evcnt(struct acpicpu_softc *sc)
231 {
232 struct acpicpu_pstate *ps;
233 uint32_t i;
234
235 for (i = 0; i < sc->sc_pstate_count; i++) {
236
237 ps = &sc->sc_pstate[i];
238
239 if (ps->ps_freq != 0)
240 evcnt_detach(&ps->ps_evcnt);
241 }
242 }
243
244 void
245 acpicpu_pstate_start(device_t self)
246 {
247 struct acpicpu_softc *sc = device_private(self);
248 struct acpicpu_pstate *ps;
249 uint32_t i;
250 int rv;
251
252 rv = acpicpu_md_pstate_start();
253
254 if (rv != 0)
255 goto fail;
256
257 /*
258 * Initialize the state to P0.
259 */
260 for (i = 0, rv = ENXIO; i < sc->sc_pstate_count; i++) {
261
262 ps = &sc->sc_pstate[i];
263
264 if (ps->ps_freq != 0) {
265 sc->sc_cold = false;
266 rv = acpicpu_pstate_set(sc, ps->ps_freq);
267 break;
268 }
269 }
270
271 if (rv != 0)
272 goto fail;
273
274 return;
275
276 fail:
277 sc->sc_flags &= ~ACPICPU_FLAG_P;
278
279 if (rv == EEXIST) {
280 aprint_error_dev(self, "driver conflicts with existing one\n");
281 return;
282 }
283
284 aprint_error_dev(self, "failed to start P-states (err %d)\n", rv);
285 }
286
287 bool
288 acpicpu_pstate_suspend(device_t self)
289 {
290 struct acpicpu_softc *sc = device_private(self);
291 struct acpicpu_pstate *ps = NULL;
292 int32_t i;
293
294 mutex_enter(&sc->sc_mtx);
295 acpicpu_pstate_reset(sc);
296 mutex_exit(&sc->sc_mtx);
297
298 if (acpicpu_pstate_saved != 0)
299 return true;
300
301 /*
302 * Following design notes for Windows, we set the highest
303 * P-state when entering any of the system sleep states.
304 * When resuming, the saved P-state will be restored.
305 *
306 * Microsoft Corporation: Windows Native Processor
307 * Performance Control. Version 1.1a, November, 2002.
308 */
309 for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
310
311 if (sc->sc_pstate[i].ps_freq != 0) {
312 ps = &sc->sc_pstate[i];
313 break;
314 }
315 }
316
317 if (__predict_false(ps == NULL))
318 return true;
319
320 mutex_enter(&sc->sc_mtx);
321 acpicpu_pstate_saved = sc->sc_pstate_current;
322 mutex_exit(&sc->sc_mtx);
323
324 if (acpicpu_pstate_saved == ps->ps_freq)
325 return true;
326
327 (void)acpicpu_pstate_set(sc, ps->ps_freq);
328
329 return true;
330 }
331
332 bool
333 acpicpu_pstate_resume(device_t self)
334 {
335 struct acpicpu_softc *sc = device_private(self);
336
337 if (acpicpu_pstate_saved != 0) {
338 (void)acpicpu_pstate_set(sc, acpicpu_pstate_saved);
339 acpicpu_pstate_saved = 0;
340 }
341
342 return true;
343 }
344
345 void
346 acpicpu_pstate_callback(void *aux)
347 {
348 struct acpicpu_softc *sc;
349 device_t self = aux;
350 uint32_t old, new;
351
352 sc = device_private(self);
353
354 mutex_enter(&sc->sc_mtx);
355
356 old = sc->sc_pstate_max;
357 acpicpu_pstate_change(sc);
358 new = sc->sc_pstate_max;
359
360 if (old == new) {
361 mutex_exit(&sc->sc_mtx);
362 return;
363 }
364
365 mutex_exit(&sc->sc_mtx);
366
367 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "maximum frequency "
368 "changed from P%u (%u MHz) to P%u (%u MHz)\n",
369 old, sc->sc_pstate[old].ps_freq, new,
370 sc->sc_pstate[sc->sc_pstate_max].ps_freq));
371
372 (void)acpicpu_pstate_set(sc, sc->sc_pstate[new].ps_freq);
373 }
374
375 ACPI_STATUS
376 acpicpu_pstate_pss(struct acpicpu_softc *sc)
377 {
378 struct acpicpu_pstate *ps;
379 ACPI_OBJECT *obj;
380 ACPI_BUFFER buf;
381 ACPI_STATUS rv;
382 uint32_t count;
383 uint32_t i, j;
384
385 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
386
387 if (ACPI_FAILURE(rv))
388 return rv;
389
390 obj = buf.Pointer;
391
392 if (obj->Type != ACPI_TYPE_PACKAGE) {
393 rv = AE_TYPE;
394 goto out;
395 }
396
397 sc->sc_pstate_count = obj->Package.Count;
398
399 if (sc->sc_pstate_count == 0) {
400 rv = AE_NOT_EXIST;
401 goto out;
402 }
403
404 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
405 rv = AE_LIMIT;
406 goto out;
407 }
408
409 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
410 sizeof(struct acpicpu_pstate), KM_SLEEP);
411
412 if (sc->sc_pstate == NULL) {
413 rv = AE_NO_MEMORY;
414 goto out;
415 }
416
417 for (count = i = 0; i < sc->sc_pstate_count; i++) {
418
419 ps = &sc->sc_pstate[i];
420 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
421
422 if (ACPI_FAILURE(rv)) {
423 aprint_error_dev(sc->sc_dev, "failed to add "
424 "P-state: %s\n", AcpiFormatException(rv));
425 ps->ps_freq = 0;
426 continue;
427 }
428
429 for (j = 0; j < i; j++) {
430
431 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
432 ps->ps_freq = 0;
433 break;
434 }
435 }
436
437 if (ps->ps_freq != 0)
438 count++;
439 }
440
441 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
442
443 out:
444 if (buf.Pointer != NULL)
445 ACPI_FREE(buf.Pointer);
446
447 return rv;
448 }
449
450 static ACPI_STATUS
451 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
452 {
453 ACPI_OBJECT *elm;
454 int i;
455
456 if (obj->Type != ACPI_TYPE_PACKAGE)
457 return AE_TYPE;
458
459 if (obj->Package.Count != 6)
460 return AE_BAD_DATA;
461
462 elm = obj->Package.Elements;
463
464 for (i = 0; i < 6; i++) {
465
466 if (elm[i].Type != ACPI_TYPE_INTEGER)
467 return AE_TYPE;
468
469 if (elm[i].Integer.Value > UINT32_MAX)
470 return AE_AML_NUMERIC_OVERFLOW;
471 }
472
473 ps->ps_freq = elm[0].Integer.Value;
474 ps->ps_power = elm[1].Integer.Value;
475 ps->ps_latency = elm[2].Integer.Value;
476 ps->ps_latency_bm = elm[3].Integer.Value;
477 ps->ps_control = elm[4].Integer.Value;
478 ps->ps_status = elm[5].Integer.Value;
479
480 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
481 return AE_BAD_DECIMAL_CONSTANT;
482
483 /*
484 * The latency is typically around 10 usec
485 * on Intel CPUs. Use that as the minimum.
486 */
487 if (ps->ps_latency < 10)
488 ps->ps_latency = 10;
489
490 return AE_OK;
491 }
492
493 static ACPI_STATUS
494 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
495 {
496 struct acpicpu_pstate *ps;
497 ACPI_OBJECT *obj;
498 ACPI_BUFFER buf;
499 ACPI_STATUS rv;
500 uint32_t i = 0;
501
502 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
503
504 if (ACPI_FAILURE(rv))
505 goto out;
506
507 obj = buf.Pointer;
508
509 if (obj->Type != ACPI_TYPE_PACKAGE) {
510 rv = AE_TYPE;
511 goto out;
512 }
513
514 if (obj->Package.Count != sc->sc_pstate_count) {
515 rv = AE_LIMIT;
516 goto out;
517 }
518
519 while (i < sc->sc_pstate_count) {
520
521 ps = &sc->sc_pstate[i];
522 acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
523
524 i++;
525 }
526
527 out:
528 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
529 aprint_error_dev(sc->sc_dev, "failed to evaluate "
530 "XPSS: %s\n", AcpiFormatException(rv));
531
532 if (buf.Pointer != NULL)
533 ACPI_FREE(buf.Pointer);
534
535 return rv;
536 }
537
538 static ACPI_STATUS
539 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
540 {
541 ACPI_OBJECT *elm;
542 int i;
543
544 if (obj->Type != ACPI_TYPE_PACKAGE)
545 return AE_TYPE;
546
547 if (obj->Package.Count != 8)
548 return AE_BAD_DATA;
549
550 elm = obj->Package.Elements;
551
552 for (i = 0; i < 4; i++) {
553
554 if (elm[i].Type != ACPI_TYPE_INTEGER)
555 return AE_TYPE;
556
557 if (elm[i].Integer.Value > UINT32_MAX)
558 return AE_AML_NUMERIC_OVERFLOW;
559 }
560
561 for (; i < 8; i++) {
562
563 if (elm[i].Type != ACPI_TYPE_BUFFER)
564 return AE_TYPE;
565
566 if (elm[i].Buffer.Length != 8)
567 return AE_LIMIT;
568 }
569
570 /*
571 * Only overwrite the elements that were
572 * not available from the conventional _PSS.
573 */
574 if (ps->ps_freq == 0)
575 ps->ps_freq = elm[0].Integer.Value;
576
577 if (ps->ps_power == 0)
578 ps->ps_power = elm[1].Integer.Value;
579
580 if (ps->ps_latency == 0)
581 ps->ps_latency = elm[2].Integer.Value;
582
583 if (ps->ps_latency_bm == 0)
584 ps->ps_latency_bm = elm[3].Integer.Value;
585
586 if (ps->ps_control == 0)
587 ps->ps_control = ACPI_GET64(elm[4].Buffer.Pointer);
588
589 if (ps->ps_status == 0)
590 ps->ps_status = ACPI_GET64(elm[5].Buffer.Pointer);
591
592 if (ps->ps_control_mask == 0)
593 ps->ps_control_mask = ACPI_GET64(elm[6].Buffer.Pointer);
594
595 if (ps->ps_status_mask == 0)
596 ps->ps_status_mask = ACPI_GET64(elm[7].Buffer.Pointer);
597
598 /*
599 * The latency is often defined to be
600 * zero on AMD systems. Raise that to 1.
601 */
602 if (ps->ps_latency == 0)
603 ps->ps_latency = 1;
604
605 ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
606
607 if (ps->ps_freq > 9999)
608 return AE_BAD_DECIMAL_CONSTANT;
609
610 return AE_OK;
611 }
612
613 ACPI_STATUS
614 acpicpu_pstate_pct(struct acpicpu_softc *sc)
615 {
616 static const size_t size = sizeof(struct acpicpu_reg);
617 struct acpicpu_reg *reg[2];
618 struct acpicpu_pstate *ps;
619 ACPI_OBJECT *elm, *obj;
620 ACPI_BUFFER buf;
621 ACPI_STATUS rv;
622 uint8_t width;
623 uint32_t i;
624
625 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
626
627 if (ACPI_FAILURE(rv))
628 return rv;
629
630 obj = buf.Pointer;
631
632 if (obj->Type != ACPI_TYPE_PACKAGE) {
633 rv = AE_TYPE;
634 goto out;
635 }
636
637 if (obj->Package.Count != 2) {
638 rv = AE_LIMIT;
639 goto out;
640 }
641
642 for (i = 0; i < 2; i++) {
643
644 elm = &obj->Package.Elements[i];
645
646 if (elm->Type != ACPI_TYPE_BUFFER) {
647 rv = AE_TYPE;
648 goto out;
649 }
650
651 if (size > elm->Buffer.Length) {
652 rv = AE_AML_BAD_RESOURCE_LENGTH;
653 goto out;
654 }
655
656 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
657
658 switch (reg[i]->reg_spaceid) {
659
660 case ACPI_ADR_SPACE_SYSTEM_IO:
661
662 if (reg[i]->reg_addr == 0) {
663 rv = AE_AML_ILLEGAL_ADDRESS;
664 goto out;
665 }
666
667 width = reg[i]->reg_bitwidth;
668
669 if (width + reg[i]->reg_bitoffset > 32) {
670 rv = AE_AML_BAD_RESOURCE_VALUE;
671 goto out;
672 }
673
674 if (width != 8 && width != 16 && width != 32) {
675 rv = AE_AML_BAD_RESOURCE_VALUE;
676 goto out;
677 }
678
679 break;
680
681 case ACPI_ADR_SPACE_FIXED_HARDWARE:
682
683 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
684
685 if (reg[i]->reg_bitwidth != 64) {
686 rv = AE_AML_BAD_RESOURCE_VALUE;
687 goto out;
688 }
689
690 if (reg[i]->reg_bitoffset != 0) {
691 rv = AE_AML_BAD_RESOURCE_VALUE;
692 goto out;
693 }
694
695 break;
696 }
697
698 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
699 rv = AE_SUPPORT;
700 goto out;
701 }
702
703 break;
704
705 default:
706 rv = AE_AML_INVALID_SPACE_ID;
707 goto out;
708 }
709 }
710
711 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
712 rv = AE_AML_INVALID_SPACE_ID;
713 goto out;
714 }
715
716 (void)memcpy(&sc->sc_pstate_control, reg[0], size);
717 (void)memcpy(&sc->sc_pstate_status, reg[1], size);
718
719 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
720 goto out;
721
722 /*
723 * In XPSS the control address can not be zero,
724 * but the status address may be. In this case,
725 * comparable to T-states, we can ignore the status
726 * check during the P-state (FFH) transition.
727 */
728 if (sc->sc_pstate_control.reg_addr == 0) {
729 rv = AE_AML_BAD_RESOURCE_LENGTH;
730 goto out;
731 }
732
733 /*
734 * If XPSS is present, copy the MSR addresses
735 * to the P-state structures for convenience.
736 */
737 for (i = 0; i < sc->sc_pstate_count; i++) {
738
739 ps = &sc->sc_pstate[i];
740
741 if (ps->ps_freq == 0)
742 continue;
743
744 ps->ps_status_addr = sc->sc_pstate_status.reg_addr;
745 ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
746 }
747
748 out:
749 if (buf.Pointer != NULL)
750 ACPI_FREE(buf.Pointer);
751
752 return rv;
753 }
754
755 static int
756 acpicpu_pstate_max(struct acpicpu_softc *sc)
757 {
758 ACPI_INTEGER val;
759 ACPI_STATUS rv;
760
761 /*
762 * Evaluate the currently highest P-state that can be used.
763 * If available, we can use either this state or any lower
764 * power (i.e. higher numbered) state from the _PSS object.
765 * Note that the return value must match the _OST parameter.
766 */
767 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
768
769 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
770
771 if (sc->sc_pstate[val].ps_freq != 0) {
772 sc->sc_pstate_max = val;
773 return 0;
774 }
775 }
776
777 return 1;
778 }
779
780 static int
781 acpicpu_pstate_min(struct acpicpu_softc *sc)
782 {
783 ACPI_INTEGER val;
784 ACPI_STATUS rv;
785
786 /*
787 * The _PDL object defines the minimum when passive cooling
788 * is being performed. If available, we can use the returned
789 * state or any higher power (i.e. lower numbered) state.
790 */
791 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
792
793 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
794
795 if (sc->sc_pstate[val].ps_freq == 0)
796 return 1;
797
798 if (val >= sc->sc_pstate_max) {
799 sc->sc_pstate_min = val;
800 return 0;
801 }
802 }
803
804 return 1;
805 }
806
807 static void
808 acpicpu_pstate_change(struct acpicpu_softc *sc)
809 {
810 static ACPI_STATUS rv = AE_OK;
811 ACPI_OBJECT_LIST arg;
812 ACPI_OBJECT obj[2];
813 static int val = 0;
814
815 acpicpu_pstate_reset(sc);
816
817 /*
818 * Cache the checks as the optional
819 * _PDL and _OST are rarely present.
820 */
821 if (val == 0)
822 val = acpicpu_pstate_min(sc);
823
824 arg.Count = 2;
825 arg.Pointer = obj;
826
827 obj[0].Type = ACPI_TYPE_INTEGER;
828 obj[1].Type = ACPI_TYPE_INTEGER;
829
830 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
831 obj[1].Integer.Value = acpicpu_pstate_max(sc);
832
833 if (ACPI_FAILURE(rv))
834 return;
835
836 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
837 }
838
839 static void
840 acpicpu_pstate_reset(struct acpicpu_softc *sc)
841 {
842
843 sc->sc_pstate_max = 0;
844 sc->sc_pstate_min = sc->sc_pstate_count - 1;
845
846 }
847
848 static void
849 acpicpu_pstate_bios(void)
850 {
851 const uint8_t val = AcpiGbl_FADT.PstateControl;
852 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
853
854 if (addr == 0 || val == 0)
855 return;
856
857 (void)AcpiOsWritePort(addr, val, 8);
858 }
859
860 int
861 acpicpu_pstate_get(struct acpicpu_softc *sc, uint32_t *freq)
862 {
863 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
864 struct acpicpu_pstate *ps = NULL;
865 uint32_t i, val = 0;
866 uint64_t addr;
867 uint8_t width;
868 int rv;
869
870 if (__predict_false(sc->sc_cold != false)) {
871 rv = EBUSY;
872 goto fail;
873 }
874
875 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
876 rv = ENODEV;
877 goto fail;
878 }
879
880 mutex_enter(&sc->sc_mtx);
881
882 /*
883 * Use the cached value, if available.
884 */
885 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) {
886 *freq = sc->sc_pstate_current;
887 mutex_exit(&sc->sc_mtx);
888 return 0;
889 }
890
891 mutex_exit(&sc->sc_mtx);
892
893 switch (method) {
894
895 case ACPI_ADR_SPACE_FIXED_HARDWARE:
896
897 rv = acpicpu_md_pstate_get(sc, freq);
898
899 if (__predict_false(rv != 0))
900 goto fail;
901
902 break;
903
904 case ACPI_ADR_SPACE_SYSTEM_IO:
905
906 addr = sc->sc_pstate_status.reg_addr;
907 width = sc->sc_pstate_status.reg_bitwidth;
908
909 (void)AcpiOsReadPort(addr, &val, width);
910
911 if (val == 0) {
912 rv = EIO;
913 goto fail;
914 }
915
916 for (i = 0; i < sc->sc_pstate_count; i++) {
917
918 if (sc->sc_pstate[i].ps_freq == 0)
919 continue;
920
921 if (val == sc->sc_pstate[i].ps_status) {
922 ps = &sc->sc_pstate[i];
923 break;
924 }
925 }
926
927 if (ps == NULL) {
928 rv = EIO;
929 goto fail;
930 }
931
932 *freq = ps->ps_freq;
933 break;
934
935 default:
936 rv = ENOTTY;
937 goto fail;
938 }
939
940 mutex_enter(&sc->sc_mtx);
941 sc->sc_pstate_current = *freq;
942 mutex_exit(&sc->sc_mtx);
943
944 return 0;
945
946 fail:
947 aprint_error_dev(sc->sc_dev, "failed "
948 "to get frequency (err %d)\n", rv);
949
950 mutex_enter(&sc->sc_mtx);
951 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
952 mutex_exit(&sc->sc_mtx);
953
954 return rv;
955 }
956
957 int
958 acpicpu_pstate_set(struct acpicpu_softc *sc, uint32_t freq)
959 {
960 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
961 struct acpicpu_pstate *ps = NULL;
962 uint32_t i, val;
963 uint64_t addr;
964 uint8_t width;
965 int rv;
966
967 if (__predict_false(sc->sc_cold != false)) {
968 rv = EBUSY;
969 goto fail;
970 }
971
972 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
973 rv = ENODEV;
974 goto fail;
975 }
976
977 mutex_enter(&sc->sc_mtx);
978
979 if (sc->sc_pstate_current == freq) {
980 mutex_exit(&sc->sc_mtx);
981 return 0;
982 }
983
984 /*
985 * Verify that the requested frequency is available.
986 *
987 * The access needs to be protected since the currently
988 * available maximum and minimum may change dynamically.
989 */
990 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
991
992 if (__predict_false(sc->sc_pstate[i].ps_freq == 0))
993 continue;
994
995 if (sc->sc_pstate[i].ps_freq == freq) {
996 ps = &sc->sc_pstate[i];
997 break;
998 }
999 }
1000
1001 mutex_exit(&sc->sc_mtx);
1002
1003 if (__predict_false(ps == NULL)) {
1004 rv = EINVAL;
1005 goto fail;
1006 }
1007
1008 switch (method) {
1009
1010 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1011
1012 rv = acpicpu_md_pstate_set(ps);
1013
1014 if (__predict_false(rv != 0))
1015 goto fail;
1016
1017 break;
1018
1019 case ACPI_ADR_SPACE_SYSTEM_IO:
1020
1021 addr = sc->sc_pstate_control.reg_addr;
1022 width = sc->sc_pstate_control.reg_bitwidth;
1023
1024 (void)AcpiOsWritePort(addr, ps->ps_control, width);
1025
1026 addr = sc->sc_pstate_status.reg_addr;
1027 width = sc->sc_pstate_status.reg_bitwidth;
1028
1029 /*
1030 * Some systems take longer to respond
1031 * than the reported worst-case latency.
1032 */
1033 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
1034
1035 (void)AcpiOsReadPort(addr, &val, width);
1036
1037 if (val == ps->ps_status)
1038 break;
1039
1040 DELAY(ps->ps_latency);
1041 }
1042
1043 if (i == ACPICPU_P_STATE_RETRY) {
1044 rv = EAGAIN;
1045 goto fail;
1046 }
1047
1048 break;
1049
1050 default:
1051 rv = ENOTTY;
1052 goto fail;
1053 }
1054
1055 mutex_enter(&sc->sc_mtx);
1056 ps->ps_evcnt.ev_count++;
1057 sc->sc_pstate_current = freq;
1058 mutex_exit(&sc->sc_mtx);
1059
1060 return 0;
1061
1062 fail:
1063 aprint_error_dev(sc->sc_dev, "failed to set "
1064 "frequency to %u (err %d)\n", freq, rv);
1065
1066 mutex_enter(&sc->sc_mtx);
1067 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
1068 mutex_exit(&sc->sc_mtx);
1069
1070 return rv;
1071 }
1072