acpi_cpu_pstate.c revision 1.29 1 /* $NetBSD: acpi_cpu_pstate.c,v 1.29 2010/08/17 10:57:30 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.29 2010/08/17 10:57:30 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/evcnt.h>
34 #include <sys/kmem.h>
35 #include <sys/once.h>
36
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40
41 #define _COMPONENT ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME ("acpi_cpu_pstate")
43
44 static void acpicpu_pstate_attach_print(struct acpicpu_softc *);
45 static void acpicpu_pstate_attach_evcnt(struct acpicpu_softc *);
46 static void acpicpu_pstate_detach_evcnt(struct acpicpu_softc *);
47 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *);
48 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
49 ACPI_OBJECT *);
50 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
52 ACPI_OBJECT *);
53 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
54 static int acpicpu_pstate_max(struct acpicpu_softc *);
55 static int acpicpu_pstate_min(struct acpicpu_softc *);
56 static void acpicpu_pstate_change(struct acpicpu_softc *);
57 static void acpicpu_pstate_reset(struct acpicpu_softc *);
58 static void acpicpu_pstate_bios(void);
59
60 static uint32_t acpicpu_pstate_saved = 0;
61
62 void
63 acpicpu_pstate_attach(device_t self)
64 {
65 struct acpicpu_softc *sc = device_private(self);
66 const char *str;
67 ACPI_HANDLE tmp;
68 ACPI_STATUS rv;
69
70 rv = acpicpu_pstate_pss(sc);
71
72 if (ACPI_FAILURE(rv)) {
73 str = "_PSS";
74 goto fail;
75 }
76
77 /*
78 * Check the availability of extended _PSS.
79 * If present, this will override the data.
80 * Note that XPSS can not be used on Intel
81 * systems where _PDC or _OSC may be used.
82 */
83 if (sc->sc_cap == 0) {
84
85 rv = acpicpu_pstate_xpss(sc);
86
87 if (ACPI_SUCCESS(rv))
88 sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
89
90 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
91 str = "XPSS";
92 goto fail;
93 }
94 }
95
96 rv = acpicpu_pstate_pct(sc);
97
98 if (ACPI_FAILURE(rv)) {
99 str = "_PCT";
100 goto fail;
101 }
102
103 /*
104 * The ACPI 3.0 and 4.0 specifications mandate three
105 * objects for P-states: _PSS, _PCT, and _PPC. A less
106 * strict wording is however used in the earlier 2.0
107 * standard, and some systems conforming to ACPI 2.0
108 * do not have _PPC, the method for dynamic maximum.
109 */
110 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
111
112 if (ACPI_FAILURE(rv))
113 aprint_debug_dev(self, "_PPC missing\n");
114
115 sc->sc_flags |= ACPICPU_FLAG_P;
116
117 acpicpu_pstate_bios();
118 acpicpu_pstate_reset(sc);
119 acpicpu_pstate_attach_evcnt(sc);
120 acpicpu_pstate_attach_print(sc);
121
122 return;
123
124 fail:
125 switch (rv) {
126
127 case AE_NOT_FOUND:
128 return;
129
130 case AE_SUPPORT:
131 aprint_verbose_dev(sc->sc_dev, "P-states not supported\n");
132 return;
133
134 default:
135 aprint_error_dev(sc->sc_dev, "failed to evaluate "
136 "%s: %s\n", str, AcpiFormatException(rv));
137 }
138 }
139
140 static void
141 acpicpu_pstate_attach_print(struct acpicpu_softc *sc)
142 {
143 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
144 struct acpicpu_pstate *ps;
145 static bool once = false;
146 const char *str;
147 uint32_t i;
148
149 if (once != false)
150 return;
151
152 str = (method != ACPI_ADR_SPACE_SYSTEM_IO) ? "FFH" : "I/O";
153
154 for (i = 0; i < sc->sc_pstate_count; i++) {
155
156 ps = &sc->sc_pstate[i];
157
158 if (ps->ps_freq == 0)
159 continue;
160
161 aprint_debug_dev(sc->sc_dev, "P%d: %3s, "
162 "lat %3u us, pow %5u mW, %4u MHz\n", i, str,
163 ps->ps_latency, ps->ps_power, ps->ps_freq);
164 }
165
166 once = true;
167 }
168
169 static void
170 acpicpu_pstate_attach_evcnt(struct acpicpu_softc *sc)
171 {
172 struct acpicpu_pstate *ps;
173 uint32_t i;
174
175 for (i = 0; i < sc->sc_pstate_count; i++) {
176
177 ps = &sc->sc_pstate[i];
178
179 if (ps->ps_freq == 0)
180 continue;
181
182 (void)snprintf(ps->ps_name, sizeof(ps->ps_name),
183 "P%u (%u MHz)", i, ps->ps_freq);
184
185 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
186 NULL, device_xname(sc->sc_dev), ps->ps_name);
187 }
188 }
189
190 int
191 acpicpu_pstate_detach(device_t self)
192 {
193 struct acpicpu_softc *sc = device_private(self);
194 static ONCE_DECL(once_detach);
195 size_t size;
196 int rv;
197
198 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
199 return 0;
200
201 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop);
202
203 if (rv != 0)
204 return rv;
205
206 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
207
208 if (sc->sc_pstate != NULL)
209 kmem_free(sc->sc_pstate, size);
210
211 sc->sc_flags &= ~ACPICPU_FLAG_P;
212 acpicpu_pstate_detach_evcnt(sc);
213
214 return 0;
215 }
216
217 static void
218 acpicpu_pstate_detach_evcnt(struct acpicpu_softc *sc)
219 {
220 struct acpicpu_pstate *ps;
221 uint32_t i;
222
223 for (i = 0; i < sc->sc_pstate_count; i++) {
224
225 ps = &sc->sc_pstate[i];
226
227 if (ps->ps_freq != 0)
228 evcnt_detach(&ps->ps_evcnt);
229 }
230 }
231
232 void
233 acpicpu_pstate_start(device_t self)
234 {
235 struct acpicpu_softc *sc = device_private(self);
236 struct acpicpu_pstate *ps;
237 uint32_t i;
238 int rv;
239
240 rv = acpicpu_md_pstate_start();
241
242 if (rv != 0)
243 goto fail;
244
245 /*
246 * Initialize the state to P0.
247 */
248 for (i = 0, rv = ENXIO; i < sc->sc_pstate_count; i++) {
249
250 ps = &sc->sc_pstate[i];
251
252 if (ps->ps_freq != 0) {
253 sc->sc_cold = false;
254 rv = acpicpu_pstate_set(sc, ps->ps_freq);
255 break;
256 }
257 }
258
259 if (rv != 0)
260 goto fail;
261
262 return;
263
264 fail:
265 sc->sc_flags &= ~ACPICPU_FLAG_P;
266 aprint_error_dev(self, "failed to start P-states (err %d)\n", rv);
267 }
268
269 bool
270 acpicpu_pstate_suspend(device_t self)
271 {
272 struct acpicpu_softc *sc = device_private(self);
273 struct acpicpu_pstate *ps = NULL;
274 int32_t i;
275
276 mutex_enter(&sc->sc_mtx);
277 acpicpu_pstate_reset(sc);
278 mutex_exit(&sc->sc_mtx);
279
280 if (acpicpu_pstate_saved != 0)
281 return true;
282
283 /*
284 * Following design notes for Windows, we set the highest
285 * P-state when entering any of the system sleep states.
286 * When resuming, the saved P-state will be restored.
287 *
288 * Microsoft Corporation: Windows Native Processor
289 * Performance Control. Version 1.1a, November, 2002.
290 */
291 for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
292
293 if (sc->sc_pstate[i].ps_freq != 0) {
294 ps = &sc->sc_pstate[i];
295 break;
296 }
297 }
298
299 if (__predict_false(ps == NULL))
300 return true;
301
302 mutex_enter(&sc->sc_mtx);
303 acpicpu_pstate_saved = sc->sc_pstate_current;
304 mutex_exit(&sc->sc_mtx);
305
306 if (acpicpu_pstate_saved == ps->ps_freq)
307 return true;
308
309 (void)acpicpu_pstate_set(sc, ps->ps_freq);
310
311 return true;
312 }
313
314 bool
315 acpicpu_pstate_resume(device_t self)
316 {
317 struct acpicpu_softc *sc = device_private(self);
318
319 if (acpicpu_pstate_saved != 0) {
320 (void)acpicpu_pstate_set(sc, acpicpu_pstate_saved);
321 acpicpu_pstate_saved = 0;
322 }
323
324 return true;
325 }
326
327 void
328 acpicpu_pstate_callback(void *aux)
329 {
330 struct acpicpu_softc *sc;
331 device_t self = aux;
332 uint32_t old, new;
333
334 sc = device_private(self);
335
336 mutex_enter(&sc->sc_mtx);
337 old = sc->sc_pstate_max;
338 acpicpu_pstate_change(sc);
339 new = sc->sc_pstate_max;
340 mutex_exit(&sc->sc_mtx);
341
342 if (old != new) {
343
344 aprint_debug_dev(sc->sc_dev, "maximum frequency "
345 "changed from P%u (%u MHz) to P%u (%u MHz)\n",
346 old, sc->sc_pstate[old].ps_freq, new,
347 sc->sc_pstate[sc->sc_pstate_max].ps_freq);
348 #if 0
349 /*
350 * If the maximum changed, proactively
351 * raise or lower the target frequency.
352 */
353 (void)acpicpu_pstate_set(sc, sc->sc_pstate[new].ps_freq);
354
355 #endif
356 }
357 }
358
359 ACPI_STATUS
360 acpicpu_pstate_pss(struct acpicpu_softc *sc)
361 {
362 struct acpicpu_pstate *ps;
363 ACPI_OBJECT *obj;
364 ACPI_BUFFER buf;
365 ACPI_STATUS rv;
366 uint32_t count;
367 uint32_t i, j;
368
369 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
370
371 if (ACPI_FAILURE(rv))
372 return rv;
373
374 obj = buf.Pointer;
375
376 if (obj->Type != ACPI_TYPE_PACKAGE) {
377 rv = AE_TYPE;
378 goto out;
379 }
380
381 sc->sc_pstate_count = obj->Package.Count;
382
383 if (sc->sc_pstate_count == 0) {
384 rv = AE_NOT_EXIST;
385 goto out;
386 }
387
388 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
389 rv = AE_LIMIT;
390 goto out;
391 }
392
393 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
394 sizeof(struct acpicpu_pstate), KM_SLEEP);
395
396 if (sc->sc_pstate == NULL) {
397 rv = AE_NO_MEMORY;
398 goto out;
399 }
400
401 for (count = i = 0; i < sc->sc_pstate_count; i++) {
402
403 ps = &sc->sc_pstate[i];
404 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
405
406 if (ACPI_FAILURE(rv)) {
407 ps->ps_freq = 0;
408 continue;
409 }
410
411 for (j = 0; j < i; j++) {
412
413 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
414 ps->ps_freq = 0;
415 break;
416 }
417 }
418
419 if (ps->ps_freq != 0)
420 count++;
421 }
422
423 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
424
425 out:
426 if (buf.Pointer != NULL)
427 ACPI_FREE(buf.Pointer);
428
429 return rv;
430 }
431
432 static ACPI_STATUS
433 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
434 {
435 ACPI_OBJECT *elm;
436 int i;
437
438 if (obj->Type != ACPI_TYPE_PACKAGE)
439 return AE_TYPE;
440
441 if (obj->Package.Count != 6)
442 return AE_BAD_DATA;
443
444 elm = obj->Package.Elements;
445
446 for (i = 0; i < 6; i++) {
447
448 if (elm[i].Type != ACPI_TYPE_INTEGER)
449 return AE_TYPE;
450
451 if (elm[i].Integer.Value > UINT32_MAX)
452 return AE_AML_NUMERIC_OVERFLOW;
453 }
454
455 ps->ps_freq = elm[0].Integer.Value;
456 ps->ps_power = elm[1].Integer.Value;
457 ps->ps_latency = elm[2].Integer.Value;
458 ps->ps_latency_bm = elm[3].Integer.Value;
459 ps->ps_control = elm[4].Integer.Value;
460 ps->ps_status = elm[5].Integer.Value;
461
462 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
463 return AE_BAD_DECIMAL_CONSTANT;
464
465 /*
466 * The latency is typically around 10 usec
467 * on Intel CPUs. Use that as the minimum.
468 */
469 if (ps->ps_latency < 10)
470 ps->ps_latency = 10;
471
472 return AE_OK;
473 }
474
475 static ACPI_STATUS
476 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
477 {
478 static const size_t size = sizeof(struct acpicpu_pstate);
479 struct acpicpu_pstate *ps;
480 ACPI_OBJECT *obj;
481 ACPI_BUFFER buf;
482 ACPI_STATUS rv;
483 uint32_t count;
484 uint32_t i, j;
485
486 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
487
488 if (ACPI_FAILURE(rv))
489 return rv;
490
491 obj = buf.Pointer;
492
493 if (obj->Type != ACPI_TYPE_PACKAGE) {
494 rv = AE_TYPE;
495 goto out;
496 }
497
498 count = obj->Package.Count;
499
500 if (count == 0) {
501 rv = AE_NOT_EXIST;
502 goto out;
503 }
504
505 if (count > ACPICPU_P_STATE_MAX) {
506 rv = AE_LIMIT;
507 goto out;
508 }
509
510 if (sc->sc_pstate != NULL)
511 kmem_free(sc->sc_pstate, sc->sc_pstate_count * size);
512
513 sc->sc_pstate = kmem_zalloc(count * size, KM_SLEEP);
514
515 if (sc->sc_pstate == NULL) {
516 rv = AE_NO_MEMORY;
517 goto out;
518 }
519
520 sc->sc_pstate_count = count;
521
522 for (count = i = 0; i < sc->sc_pstate_count; i++) {
523
524 ps = &sc->sc_pstate[i];
525 rv = acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
526
527 if (ACPI_FAILURE(rv)) {
528 ps->ps_freq = 0;
529 continue;
530 }
531
532 for (j = 0; j < i; j++) {
533
534 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
535 ps->ps_freq = 0;
536 break;
537 }
538 }
539
540 if (ps->ps_freq != 0)
541 count++;
542 }
543
544 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
545
546 out:
547 if (buf.Pointer != NULL)
548 ACPI_FREE(buf.Pointer);
549
550 return rv;
551 }
552
553 static ACPI_STATUS
554 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
555 {
556 static const size_t size = sizeof(uint64_t);
557 ACPI_OBJECT *elm;
558 int i;
559
560 if (obj->Type != ACPI_TYPE_PACKAGE)
561 return AE_TYPE;
562
563 if (obj->Package.Count != 8)
564 return AE_BAD_DATA;
565
566 elm = obj->Package.Elements;
567
568 for (i = 0; i < 4; i++) {
569
570 if (elm[i].Type != ACPI_TYPE_INTEGER)
571 return AE_TYPE;
572
573 if (elm[i].Integer.Value > UINT32_MAX)
574 return AE_AML_NUMERIC_OVERFLOW;
575 }
576
577 for (; i < 8; i++) {
578
579 if (elm[i].Type != ACPI_TYPE_BUFFER)
580 return AE_TYPE;
581
582 if (elm[i].Buffer.Length > size)
583 return AE_LIMIT;
584 }
585
586 ps->ps_freq = elm[0].Integer.Value;
587 ps->ps_power = elm[1].Integer.Value;
588 ps->ps_latency = elm[2].Integer.Value;
589 ps->ps_latency_bm = elm[3].Integer.Value;
590
591 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
592 return AE_BAD_DECIMAL_CONSTANT;
593
594 (void)memcpy(&ps->ps_control, elm[4].Buffer.Pointer, size);
595 (void)memcpy(&ps->ps_status, elm[5].Buffer.Pointer, size);
596
597 (void)memcpy(&ps->ps_control_mask, elm[6].Buffer.Pointer, size);
598 (void)memcpy(&ps->ps_status_mask, elm[7].Buffer.Pointer, size);
599
600 /*
601 * The latency is often defined to be
602 * zero on AMD systems. Raise that to 1.
603 */
604 if (ps->ps_latency == 0)
605 ps->ps_latency = 1;
606
607 ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
608
609 return AE_OK;
610 }
611
612 ACPI_STATUS
613 acpicpu_pstate_pct(struct acpicpu_softc *sc)
614 {
615 static const size_t size = sizeof(struct acpicpu_reg);
616 struct acpicpu_reg *reg[2];
617 struct acpicpu_pstate *ps;
618 ACPI_OBJECT *elm, *obj;
619 ACPI_BUFFER buf;
620 ACPI_STATUS rv;
621 uint8_t width;
622 uint32_t i;
623
624 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
625
626 if (ACPI_FAILURE(rv))
627 return rv;
628
629 obj = buf.Pointer;
630
631 if (obj->Type != ACPI_TYPE_PACKAGE) {
632 rv = AE_TYPE;
633 goto out;
634 }
635
636 if (obj->Package.Count != 2) {
637 rv = AE_LIMIT;
638 goto out;
639 }
640
641 for (i = 0; i < 2; i++) {
642
643 elm = &obj->Package.Elements[i];
644
645 if (elm->Type != ACPI_TYPE_BUFFER) {
646 rv = AE_TYPE;
647 goto out;
648 }
649
650 if (size > elm->Buffer.Length) {
651 rv = AE_AML_BAD_RESOURCE_LENGTH;
652 goto out;
653 }
654
655 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
656
657 switch (reg[i]->reg_spaceid) {
658
659 case ACPI_ADR_SPACE_SYSTEM_IO:
660
661 if (reg[i]->reg_addr == 0) {
662 rv = AE_AML_ILLEGAL_ADDRESS;
663 goto out;
664 }
665
666 width = reg[i]->reg_bitwidth;
667
668 if (width + reg[i]->reg_bitoffset > 32) {
669 rv = AE_AML_BAD_RESOURCE_VALUE;
670 goto out;
671 }
672
673 if (width != 8 && width != 16 && width != 32) {
674 rv = AE_AML_BAD_RESOURCE_VALUE;
675 goto out;
676 }
677
678 break;
679
680 case ACPI_ADR_SPACE_FIXED_HARDWARE:
681
682 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
683
684 if (reg[i]->reg_bitwidth != 64) {
685 rv = AE_AML_BAD_RESOURCE_VALUE;
686 goto out;
687 }
688
689 if (reg[i]->reg_bitoffset != 0) {
690 rv = AE_AML_BAD_RESOURCE_VALUE;
691 goto out;
692 }
693
694 break;
695 }
696
697 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
698 rv = AE_SUPPORT;
699 goto out;
700 }
701
702 break;
703
704 default:
705 rv = AE_AML_INVALID_SPACE_ID;
706 goto out;
707 }
708 }
709
710 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
711 rv = AE_AML_INVALID_SPACE_ID;
712 goto out;
713 }
714
715 (void)memcpy(&sc->sc_pstate_control, reg[0], size);
716 (void)memcpy(&sc->sc_pstate_status, reg[1], size);
717
718 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
719 goto out;
720
721 /*
722 * In XPSS the control address can not be zero,
723 * but the status address may be. In this case,
724 * comparable to T-states, we can ignore the status
725 * check during the P-state (FFH) transition.
726 */
727 if (sc->sc_pstate_control.reg_addr == 0) {
728 rv = AE_AML_BAD_RESOURCE_LENGTH;
729 goto out;
730 }
731
732 /*
733 * If XPSS is present, copy the MSR addresses
734 * to the P-state structures for convenience.
735 */
736 for (i = 0; i < sc->sc_pstate_count; i++) {
737
738 ps = &sc->sc_pstate[i];
739
740 if (ps->ps_freq == 0)
741 continue;
742
743 ps->ps_status_addr = sc->sc_pstate_status.reg_addr;
744 ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
745 }
746
747 out:
748 if (buf.Pointer != NULL)
749 ACPI_FREE(buf.Pointer);
750
751 return rv;
752 }
753
754 static int
755 acpicpu_pstate_max(struct acpicpu_softc *sc)
756 {
757 ACPI_INTEGER val;
758 ACPI_STATUS rv;
759
760 /*
761 * Evaluate the currently highest P-state that can be used.
762 * If available, we can use either this state or any lower
763 * power (i.e. higher numbered) state from the _PSS object.
764 * Note that the return value must match the _OST parameter.
765 */
766 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
767
768 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
769
770 if (sc->sc_pstate[val].ps_freq != 0) {
771 sc->sc_pstate_max = val;
772 return 0;
773 }
774 }
775
776 return 1;
777 }
778
779 static int
780 acpicpu_pstate_min(struct acpicpu_softc *sc)
781 {
782 ACPI_INTEGER val;
783 ACPI_STATUS rv;
784
785 /*
786 * The _PDL object defines the minimum when passive cooling
787 * is being performed. If available, we can use the returned
788 * state or any higher power (i.e. lower numbered) state.
789 */
790 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
791
792 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
793
794 if (sc->sc_pstate[val].ps_freq == 0)
795 return 1;
796
797 if (val >= sc->sc_pstate_max) {
798 sc->sc_pstate_min = val;
799 return 0;
800 }
801 }
802
803 return 1;
804 }
805
806 static void
807 acpicpu_pstate_change(struct acpicpu_softc *sc)
808 {
809 static ACPI_STATUS rv = AE_OK;
810 ACPI_OBJECT_LIST arg;
811 ACPI_OBJECT obj[2];
812
813 acpicpu_pstate_reset(sc);
814
815 arg.Count = 2;
816 arg.Pointer = obj;
817
818 obj[0].Type = ACPI_TYPE_INTEGER;
819 obj[1].Type = ACPI_TYPE_INTEGER;
820
821 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
822 obj[1].Integer.Value = acpicpu_pstate_max(sc);
823
824 if (sc->sc_passive != false)
825 (void)acpicpu_pstate_min(sc);
826
827 if (ACPI_FAILURE(rv))
828 return;
829
830 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
831 }
832
833 static void
834 acpicpu_pstate_reset(struct acpicpu_softc *sc)
835 {
836
837 sc->sc_pstate_max = 0;
838 sc->sc_pstate_min = sc->sc_pstate_count - 1;
839
840 }
841
842 static void
843 acpicpu_pstate_bios(void)
844 {
845 const uint8_t val = AcpiGbl_FADT.PstateControl;
846 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
847
848 if (addr == 0 || val == 0)
849 return;
850
851 (void)AcpiOsWritePort(addr, val, 8);
852 }
853
854 int
855 acpicpu_pstate_get(struct acpicpu_softc *sc, uint32_t *freq)
856 {
857 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
858 struct acpicpu_pstate *ps = NULL;
859 uint32_t i, val = 0;
860 uint64_t addr;
861 uint8_t width;
862 int rv;
863
864 if (sc->sc_cold != false) {
865 rv = EBUSY;
866 goto fail;
867 }
868
869 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
870 rv = ENODEV;
871 goto fail;
872 }
873
874 mutex_enter(&sc->sc_mtx);
875
876 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) {
877 *freq = sc->sc_pstate_current;
878 mutex_exit(&sc->sc_mtx);
879 return 0;
880 }
881
882 mutex_exit(&sc->sc_mtx);
883
884 switch (method) {
885
886 case ACPI_ADR_SPACE_FIXED_HARDWARE:
887
888 rv = acpicpu_md_pstate_get(sc, freq);
889
890 if (rv != 0)
891 goto fail;
892
893 break;
894
895 case ACPI_ADR_SPACE_SYSTEM_IO:
896
897 addr = sc->sc_pstate_status.reg_addr;
898 width = sc->sc_pstate_status.reg_bitwidth;
899
900 (void)AcpiOsReadPort(addr, &val, width);
901
902 if (val == 0) {
903 rv = EIO;
904 goto fail;
905 }
906
907 for (i = 0; i < sc->sc_pstate_count; i++) {
908
909 if (sc->sc_pstate[i].ps_freq == 0)
910 continue;
911
912 if (val == sc->sc_pstate[i].ps_status) {
913 ps = &sc->sc_pstate[i];
914 break;
915 }
916 }
917
918 if (__predict_false(ps == NULL)) {
919 rv = EIO;
920 goto fail;
921 }
922
923 *freq = ps->ps_freq;
924 break;
925
926 default:
927 rv = ENOTTY;
928 goto fail;
929 }
930
931 mutex_enter(&sc->sc_mtx);
932 sc->sc_pstate_current = *freq;
933 mutex_exit(&sc->sc_mtx);
934
935 return 0;
936
937 fail:
938 aprint_error_dev(sc->sc_dev, "failed "
939 "to get frequency (err %d)\n", rv);
940
941 mutex_enter(&sc->sc_mtx);
942 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
943 mutex_exit(&sc->sc_mtx);
944
945 return rv;
946 }
947
948 int
949 acpicpu_pstate_set(struct acpicpu_softc *sc, uint32_t freq)
950 {
951 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
952 struct acpicpu_pstate *ps = NULL;
953 uint32_t i, val;
954 uint64_t addr;
955 uint8_t width;
956 int rv;
957
958 if (sc->sc_cold != false) {
959 rv = EBUSY;
960 goto fail;
961 }
962
963 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
964 rv = ENODEV;
965 goto fail;
966 }
967
968 mutex_enter(&sc->sc_mtx);
969
970 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
971
972 if (sc->sc_pstate[i].ps_freq == 0)
973 continue;
974
975 if (sc->sc_pstate[i].ps_freq == freq) {
976 ps = &sc->sc_pstate[i];
977 break;
978 }
979 }
980
981 mutex_exit(&sc->sc_mtx);
982
983 if (__predict_false(ps == NULL)) {
984 rv = EINVAL;
985 goto fail;
986 }
987
988 switch (method) {
989
990 case ACPI_ADR_SPACE_FIXED_HARDWARE:
991
992 rv = acpicpu_md_pstate_set(ps);
993
994 if (rv != 0)
995 goto fail;
996
997 break;
998
999 case ACPI_ADR_SPACE_SYSTEM_IO:
1000
1001 addr = sc->sc_pstate_control.reg_addr;
1002 width = sc->sc_pstate_control.reg_bitwidth;
1003
1004 (void)AcpiOsWritePort(addr, ps->ps_control, width);
1005
1006 addr = sc->sc_pstate_status.reg_addr;
1007 width = sc->sc_pstate_status.reg_bitwidth;
1008
1009 /*
1010 * Some systems take longer to respond
1011 * than the reported worst-case latency.
1012 */
1013 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
1014
1015 (void)AcpiOsReadPort(addr, &val, width);
1016
1017 if (val == ps->ps_status)
1018 break;
1019
1020 DELAY(ps->ps_latency);
1021 }
1022
1023 if (i == ACPICPU_P_STATE_RETRY) {
1024 rv = EAGAIN;
1025 goto fail;
1026 }
1027
1028 break;
1029
1030 default:
1031 rv = ENOTTY;
1032 goto fail;
1033 }
1034
1035 mutex_enter(&sc->sc_mtx);
1036 ps->ps_evcnt.ev_count++;
1037 sc->sc_pstate_current = freq;
1038 mutex_exit(&sc->sc_mtx);
1039
1040 return 0;
1041
1042 fail:
1043 aprint_error_dev(sc->sc_dev, "failed to set "
1044 "frequency to %u (err %d)\n", freq, rv);
1045
1046 mutex_enter(&sc->sc_mtx);
1047 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
1048 mutex_exit(&sc->sc_mtx);
1049
1050 return rv;
1051 }
1052