acpi_cpu_pstate.c revision 1.32 1 /* $NetBSD: acpi_cpu_pstate.c,v 1.32 2010/08/20 06:36:40 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.32 2010/08/20 06:36:40 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/evcnt.h>
34 #include <sys/kmem.h>
35 #include <sys/once.h>
36
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40
41 #define _COMPONENT ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME ("acpi_cpu_pstate")
43
44 static void acpicpu_pstate_attach_print(struct acpicpu_softc *);
45 static void acpicpu_pstate_attach_evcnt(struct acpicpu_softc *);
46 static void acpicpu_pstate_detach_evcnt(struct acpicpu_softc *);
47 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *);
48 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
49 ACPI_OBJECT *);
50 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
52 ACPI_OBJECT *);
53 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
54 static int acpicpu_pstate_max(struct acpicpu_softc *);
55 static int acpicpu_pstate_min(struct acpicpu_softc *);
56 static void acpicpu_pstate_change(struct acpicpu_softc *);
57 static void acpicpu_pstate_reset(struct acpicpu_softc *);
58 static void acpicpu_pstate_bios(void);
59
60 static uint32_t acpicpu_pstate_saved = 0;
61
62 void
63 acpicpu_pstate_attach(device_t self)
64 {
65 struct acpicpu_softc *sc = device_private(self);
66 const char *str;
67 ACPI_HANDLE tmp;
68 ACPI_STATUS rv;
69
70 rv = acpicpu_pstate_pss(sc);
71
72 if (ACPI_FAILURE(rv)) {
73 str = "_PSS";
74 goto fail;
75 }
76
77 /*
78 * Check the availability of extended _PSS.
79 * If present, this will override the data.
80 * Note that XPSS can not be used on Intel
81 * systems where _PDC or _OSC may be used.
82 */
83 if (sc->sc_cap == 0) {
84
85 rv = acpicpu_pstate_xpss(sc);
86
87 if (ACPI_SUCCESS(rv))
88 sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
89
90 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
91 str = "XPSS";
92 goto fail;
93 }
94 }
95
96 rv = acpicpu_pstate_pct(sc);
97
98 if (ACPI_FAILURE(rv)) {
99 str = "_PCT";
100 goto fail;
101 }
102
103 /*
104 * The ACPI 3.0 and 4.0 specifications mandate three
105 * objects for P-states: _PSS, _PCT, and _PPC. A less
106 * strict wording is however used in the earlier 2.0
107 * standard, and some systems conforming to ACPI 2.0
108 * do not have _PPC, the method for dynamic maximum.
109 */
110 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
111
112 if (ACPI_FAILURE(rv))
113 aprint_debug_dev(self, "_PPC missing\n");
114
115 /*
116 * Employ the XPSS structure by filling
117 * it with MD information required for FFH.
118 */
119 rv = acpicpu_md_pstate_pss(sc);
120
121 if (rv != 0) {
122 rv = AE_SUPPORT;
123 goto fail;
124 }
125
126 sc->sc_flags |= ACPICPU_FLAG_P;
127
128 acpicpu_pstate_bios();
129 acpicpu_pstate_reset(sc);
130 acpicpu_pstate_attach_evcnt(sc);
131 acpicpu_pstate_attach_print(sc);
132
133 return;
134
135 fail:
136 switch (rv) {
137
138 case AE_NOT_FOUND:
139 return;
140
141 case AE_SUPPORT:
142 aprint_verbose_dev(sc->sc_dev, "P-states not supported\n");
143 return;
144
145 default:
146 aprint_error_dev(sc->sc_dev, "failed to evaluate "
147 "%s: %s\n", str, AcpiFormatException(rv));
148 }
149 }
150
151 static void
152 acpicpu_pstate_attach_print(struct acpicpu_softc *sc)
153 {
154 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
155 struct acpicpu_pstate *ps;
156 static bool once = false;
157 const char *str;
158 uint32_t i;
159
160 if (once != false)
161 return;
162
163 str = (method != ACPI_ADR_SPACE_SYSTEM_IO) ? "FFH" : "I/O";
164
165 for (i = 0; i < sc->sc_pstate_count; i++) {
166
167 ps = &sc->sc_pstate[i];
168
169 if (ps->ps_freq == 0)
170 continue;
171
172 aprint_debug_dev(sc->sc_dev, "P%d: %3s, "
173 "lat %3u us, pow %5u mW, %4u MHz\n", i, str,
174 ps->ps_latency, ps->ps_power, ps->ps_freq);
175 }
176
177 once = true;
178 }
179
180 static void
181 acpicpu_pstate_attach_evcnt(struct acpicpu_softc *sc)
182 {
183 struct acpicpu_pstate *ps;
184 uint32_t i;
185
186 for (i = 0; i < sc->sc_pstate_count; i++) {
187
188 ps = &sc->sc_pstate[i];
189
190 if (ps->ps_freq == 0)
191 continue;
192
193 (void)snprintf(ps->ps_name, sizeof(ps->ps_name),
194 "P%u (%u MHz)", i, ps->ps_freq);
195
196 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
197 NULL, device_xname(sc->sc_dev), ps->ps_name);
198 }
199 }
200
201 int
202 acpicpu_pstate_detach(device_t self)
203 {
204 struct acpicpu_softc *sc = device_private(self);
205 static ONCE_DECL(once_detach);
206 size_t size;
207 int rv;
208
209 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
210 return 0;
211
212 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop);
213
214 if (rv != 0)
215 return rv;
216
217 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
218
219 if (sc->sc_pstate != NULL)
220 kmem_free(sc->sc_pstate, size);
221
222 sc->sc_flags &= ~ACPICPU_FLAG_P;
223 acpicpu_pstate_detach_evcnt(sc);
224
225 return 0;
226 }
227
228 static void
229 acpicpu_pstate_detach_evcnt(struct acpicpu_softc *sc)
230 {
231 struct acpicpu_pstate *ps;
232 uint32_t i;
233
234 for (i = 0; i < sc->sc_pstate_count; i++) {
235
236 ps = &sc->sc_pstate[i];
237
238 if (ps->ps_freq != 0)
239 evcnt_detach(&ps->ps_evcnt);
240 }
241 }
242
243 void
244 acpicpu_pstate_start(device_t self)
245 {
246 struct acpicpu_softc *sc = device_private(self);
247 struct acpicpu_pstate *ps;
248 uint32_t i;
249 int rv;
250
251 rv = acpicpu_md_pstate_start();
252
253 if (rv != 0)
254 goto fail;
255
256 /*
257 * Initialize the state to P0.
258 */
259 for (i = 0, rv = ENXIO; i < sc->sc_pstate_count; i++) {
260
261 ps = &sc->sc_pstate[i];
262
263 if (ps->ps_freq != 0) {
264 sc->sc_cold = false;
265 rv = acpicpu_pstate_set(sc, ps->ps_freq);
266 break;
267 }
268 }
269
270 if (rv != 0)
271 goto fail;
272
273 return;
274
275 fail:
276 sc->sc_flags &= ~ACPICPU_FLAG_P;
277
278 if (rv == EEXIST) {
279 aprint_error_dev(self, "driver conflicts with existing one\n");
280 return;
281 }
282
283 aprint_error_dev(self, "failed to start P-states (err %d)\n", rv);
284 }
285
286 bool
287 acpicpu_pstate_suspend(device_t self)
288 {
289 struct acpicpu_softc *sc = device_private(self);
290 struct acpicpu_pstate *ps = NULL;
291 int32_t i;
292
293 mutex_enter(&sc->sc_mtx);
294 acpicpu_pstate_reset(sc);
295 mutex_exit(&sc->sc_mtx);
296
297 if (acpicpu_pstate_saved != 0)
298 return true;
299
300 /*
301 * Following design notes for Windows, we set the highest
302 * P-state when entering any of the system sleep states.
303 * When resuming, the saved P-state will be restored.
304 *
305 * Microsoft Corporation: Windows Native Processor
306 * Performance Control. Version 1.1a, November, 2002.
307 */
308 for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
309
310 if (sc->sc_pstate[i].ps_freq != 0) {
311 ps = &sc->sc_pstate[i];
312 break;
313 }
314 }
315
316 if (__predict_false(ps == NULL))
317 return true;
318
319 mutex_enter(&sc->sc_mtx);
320 acpicpu_pstate_saved = sc->sc_pstate_current;
321 mutex_exit(&sc->sc_mtx);
322
323 if (acpicpu_pstate_saved == ps->ps_freq)
324 return true;
325
326 (void)acpicpu_pstate_set(sc, ps->ps_freq);
327
328 return true;
329 }
330
331 bool
332 acpicpu_pstate_resume(device_t self)
333 {
334 struct acpicpu_softc *sc = device_private(self);
335
336 if (acpicpu_pstate_saved != 0) {
337 (void)acpicpu_pstate_set(sc, acpicpu_pstate_saved);
338 acpicpu_pstate_saved = 0;
339 }
340
341 return true;
342 }
343
344 void
345 acpicpu_pstate_callback(void *aux)
346 {
347 struct acpicpu_softc *sc;
348 device_t self = aux;
349 uint32_t old, new;
350
351 sc = device_private(self);
352
353 mutex_enter(&sc->sc_mtx);
354 old = sc->sc_pstate_max;
355 acpicpu_pstate_change(sc);
356 new = sc->sc_pstate_max;
357 mutex_exit(&sc->sc_mtx);
358
359 if (old != new) {
360
361 aprint_debug_dev(sc->sc_dev, "maximum frequency "
362 "changed from P%u (%u MHz) to P%u (%u MHz)\n",
363 old, sc->sc_pstate[old].ps_freq, new,
364 sc->sc_pstate[sc->sc_pstate_max].ps_freq);
365 #if 0
366 /*
367 * If the maximum changed, proactively
368 * raise or lower the target frequency.
369 */
370 (void)acpicpu_pstate_set(sc, sc->sc_pstate[new].ps_freq);
371
372 #endif
373 }
374 }
375
376 ACPI_STATUS
377 acpicpu_pstate_pss(struct acpicpu_softc *sc)
378 {
379 struct acpicpu_pstate *ps;
380 ACPI_OBJECT *obj;
381 ACPI_BUFFER buf;
382 ACPI_STATUS rv;
383 uint32_t count;
384 uint32_t i, j;
385
386 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
387
388 if (ACPI_FAILURE(rv))
389 return rv;
390
391 obj = buf.Pointer;
392
393 if (obj->Type != ACPI_TYPE_PACKAGE) {
394 rv = AE_TYPE;
395 goto out;
396 }
397
398 sc->sc_pstate_count = obj->Package.Count;
399
400 if (sc->sc_pstate_count == 0) {
401 rv = AE_NOT_EXIST;
402 goto out;
403 }
404
405 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
406 rv = AE_LIMIT;
407 goto out;
408 }
409
410 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
411 sizeof(struct acpicpu_pstate), KM_SLEEP);
412
413 if (sc->sc_pstate == NULL) {
414 rv = AE_NO_MEMORY;
415 goto out;
416 }
417
418 for (count = i = 0; i < sc->sc_pstate_count; i++) {
419
420 ps = &sc->sc_pstate[i];
421 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
422
423 if (ACPI_FAILURE(rv)) {
424 ps->ps_freq = 0;
425 continue;
426 }
427
428 for (j = 0; j < i; j++) {
429
430 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
431 ps->ps_freq = 0;
432 break;
433 }
434 }
435
436 if (ps->ps_freq != 0)
437 count++;
438 }
439
440 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
441
442 out:
443 if (buf.Pointer != NULL)
444 ACPI_FREE(buf.Pointer);
445
446 return rv;
447 }
448
449 static ACPI_STATUS
450 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
451 {
452 ACPI_OBJECT *elm;
453 int i;
454
455 if (obj->Type != ACPI_TYPE_PACKAGE)
456 return AE_TYPE;
457
458 if (obj->Package.Count != 6)
459 return AE_BAD_DATA;
460
461 elm = obj->Package.Elements;
462
463 for (i = 0; i < 6; i++) {
464
465 if (elm[i].Type != ACPI_TYPE_INTEGER)
466 return AE_TYPE;
467
468 if (elm[i].Integer.Value > UINT32_MAX)
469 return AE_AML_NUMERIC_OVERFLOW;
470 }
471
472 ps->ps_freq = elm[0].Integer.Value;
473 ps->ps_power = elm[1].Integer.Value;
474 ps->ps_latency = elm[2].Integer.Value;
475 ps->ps_latency_bm = elm[3].Integer.Value;
476 ps->ps_control = elm[4].Integer.Value;
477 ps->ps_status = elm[5].Integer.Value;
478
479 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
480 return AE_BAD_DECIMAL_CONSTANT;
481
482 /*
483 * The latency is typically around 10 usec
484 * on Intel CPUs. Use that as the minimum.
485 */
486 if (ps->ps_latency < 10)
487 ps->ps_latency = 10;
488
489 return AE_OK;
490 }
491
492 static ACPI_STATUS
493 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
494 {
495 static const size_t size = sizeof(struct acpicpu_pstate);
496 struct acpicpu_pstate *ps;
497 ACPI_OBJECT *obj;
498 ACPI_BUFFER buf;
499 ACPI_STATUS rv;
500 uint32_t count;
501 uint32_t i, j;
502
503 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
504
505 if (ACPI_FAILURE(rv))
506 return rv;
507
508 obj = buf.Pointer;
509
510 if (obj->Type != ACPI_TYPE_PACKAGE) {
511 rv = AE_TYPE;
512 goto out;
513 }
514
515 count = obj->Package.Count;
516
517 if (count == 0) {
518 rv = AE_NOT_EXIST;
519 goto out;
520 }
521
522 if (count > ACPICPU_P_STATE_MAX) {
523 rv = AE_LIMIT;
524 goto out;
525 }
526
527 if (sc->sc_pstate != NULL)
528 kmem_free(sc->sc_pstate, sc->sc_pstate_count * size);
529
530 sc->sc_pstate = kmem_zalloc(count * size, KM_SLEEP);
531
532 if (sc->sc_pstate == NULL) {
533 rv = AE_NO_MEMORY;
534 goto out;
535 }
536
537 sc->sc_pstate_count = count;
538
539 for (count = i = 0; i < sc->sc_pstate_count; i++) {
540
541 ps = &sc->sc_pstate[i];
542 rv = acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
543
544 if (ACPI_FAILURE(rv)) {
545 ps->ps_freq = 0;
546 continue;
547 }
548
549 for (j = 0; j < i; j++) {
550
551 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
552 ps->ps_freq = 0;
553 break;
554 }
555 }
556
557 if (ps->ps_freq != 0)
558 count++;
559 }
560
561 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
562
563 out:
564 if (buf.Pointer != NULL)
565 ACPI_FREE(buf.Pointer);
566
567 return rv;
568 }
569
570 static ACPI_STATUS
571 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
572 {
573 static const size_t size = sizeof(uint64_t);
574 ACPI_OBJECT *elm;
575 int i;
576
577 if (obj->Type != ACPI_TYPE_PACKAGE)
578 return AE_TYPE;
579
580 if (obj->Package.Count != 8)
581 return AE_BAD_DATA;
582
583 elm = obj->Package.Elements;
584
585 for (i = 0; i < 4; i++) {
586
587 if (elm[i].Type != ACPI_TYPE_INTEGER)
588 return AE_TYPE;
589
590 if (elm[i].Integer.Value > UINT32_MAX)
591 return AE_AML_NUMERIC_OVERFLOW;
592 }
593
594 for (; i < 8; i++) {
595
596 if (elm[i].Type != ACPI_TYPE_BUFFER)
597 return AE_TYPE;
598
599 if (elm[i].Buffer.Length > size)
600 return AE_LIMIT;
601 }
602
603 ps->ps_freq = elm[0].Integer.Value;
604 ps->ps_power = elm[1].Integer.Value;
605 ps->ps_latency = elm[2].Integer.Value;
606 ps->ps_latency_bm = elm[3].Integer.Value;
607
608 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
609 return AE_BAD_DECIMAL_CONSTANT;
610
611 (void)memcpy(&ps->ps_control, elm[4].Buffer.Pointer, size);
612 (void)memcpy(&ps->ps_status, elm[5].Buffer.Pointer, size);
613
614 (void)memcpy(&ps->ps_control_mask, elm[6].Buffer.Pointer, size);
615 (void)memcpy(&ps->ps_status_mask, elm[7].Buffer.Pointer, size);
616
617 /*
618 * The latency is often defined to be
619 * zero on AMD systems. Raise that to 1.
620 */
621 if (ps->ps_latency == 0)
622 ps->ps_latency = 1;
623
624 ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
625
626 return AE_OK;
627 }
628
629 ACPI_STATUS
630 acpicpu_pstate_pct(struct acpicpu_softc *sc)
631 {
632 static const size_t size = sizeof(struct acpicpu_reg);
633 struct acpicpu_reg *reg[2];
634 struct acpicpu_pstate *ps;
635 ACPI_OBJECT *elm, *obj;
636 ACPI_BUFFER buf;
637 ACPI_STATUS rv;
638 uint8_t width;
639 uint32_t i;
640
641 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
642
643 if (ACPI_FAILURE(rv))
644 return rv;
645
646 obj = buf.Pointer;
647
648 if (obj->Type != ACPI_TYPE_PACKAGE) {
649 rv = AE_TYPE;
650 goto out;
651 }
652
653 if (obj->Package.Count != 2) {
654 rv = AE_LIMIT;
655 goto out;
656 }
657
658 for (i = 0; i < 2; i++) {
659
660 elm = &obj->Package.Elements[i];
661
662 if (elm->Type != ACPI_TYPE_BUFFER) {
663 rv = AE_TYPE;
664 goto out;
665 }
666
667 if (size > elm->Buffer.Length) {
668 rv = AE_AML_BAD_RESOURCE_LENGTH;
669 goto out;
670 }
671
672 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
673
674 switch (reg[i]->reg_spaceid) {
675
676 case ACPI_ADR_SPACE_SYSTEM_IO:
677
678 if (reg[i]->reg_addr == 0) {
679 rv = AE_AML_ILLEGAL_ADDRESS;
680 goto out;
681 }
682
683 width = reg[i]->reg_bitwidth;
684
685 if (width + reg[i]->reg_bitoffset > 32) {
686 rv = AE_AML_BAD_RESOURCE_VALUE;
687 goto out;
688 }
689
690 if (width != 8 && width != 16 && width != 32) {
691 rv = AE_AML_BAD_RESOURCE_VALUE;
692 goto out;
693 }
694
695 break;
696
697 case ACPI_ADR_SPACE_FIXED_HARDWARE:
698
699 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
700
701 if (reg[i]->reg_bitwidth != 64) {
702 rv = AE_AML_BAD_RESOURCE_VALUE;
703 goto out;
704 }
705
706 if (reg[i]->reg_bitoffset != 0) {
707 rv = AE_AML_BAD_RESOURCE_VALUE;
708 goto out;
709 }
710
711 break;
712 }
713
714 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
715 rv = AE_SUPPORT;
716 goto out;
717 }
718
719 break;
720
721 default:
722 rv = AE_AML_INVALID_SPACE_ID;
723 goto out;
724 }
725 }
726
727 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
728 rv = AE_AML_INVALID_SPACE_ID;
729 goto out;
730 }
731
732 (void)memcpy(&sc->sc_pstate_control, reg[0], size);
733 (void)memcpy(&sc->sc_pstate_status, reg[1], size);
734
735 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
736 goto out;
737
738 /*
739 * In XPSS the control address can not be zero,
740 * but the status address may be. In this case,
741 * comparable to T-states, we can ignore the status
742 * check during the P-state (FFH) transition.
743 */
744 if (sc->sc_pstate_control.reg_addr == 0) {
745 rv = AE_AML_BAD_RESOURCE_LENGTH;
746 goto out;
747 }
748
749 /*
750 * If XPSS is present, copy the MSR addresses
751 * to the P-state structures for convenience.
752 */
753 for (i = 0; i < sc->sc_pstate_count; i++) {
754
755 ps = &sc->sc_pstate[i];
756
757 if (ps->ps_freq == 0)
758 continue;
759
760 ps->ps_status_addr = sc->sc_pstate_status.reg_addr;
761 ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
762 }
763
764 out:
765 if (buf.Pointer != NULL)
766 ACPI_FREE(buf.Pointer);
767
768 return rv;
769 }
770
771 static int
772 acpicpu_pstate_max(struct acpicpu_softc *sc)
773 {
774 ACPI_INTEGER val;
775 ACPI_STATUS rv;
776
777 /*
778 * Evaluate the currently highest P-state that can be used.
779 * If available, we can use either this state or any lower
780 * power (i.e. higher numbered) state from the _PSS object.
781 * Note that the return value must match the _OST parameter.
782 */
783 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
784
785 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
786
787 if (sc->sc_pstate[val].ps_freq != 0) {
788 sc->sc_pstate_max = val;
789 return 0;
790 }
791 }
792
793 return 1;
794 }
795
796 static int
797 acpicpu_pstate_min(struct acpicpu_softc *sc)
798 {
799 ACPI_INTEGER val;
800 ACPI_STATUS rv;
801
802 /*
803 * The _PDL object defines the minimum when passive cooling
804 * is being performed. If available, we can use the returned
805 * state or any higher power (i.e. lower numbered) state.
806 */
807 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
808
809 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
810
811 if (sc->sc_pstate[val].ps_freq == 0)
812 return 1;
813
814 if (val >= sc->sc_pstate_max) {
815 sc->sc_pstate_min = val;
816 return 0;
817 }
818 }
819
820 return 1;
821 }
822
823 static void
824 acpicpu_pstate_change(struct acpicpu_softc *sc)
825 {
826 static ACPI_STATUS rv = AE_OK;
827 ACPI_OBJECT_LIST arg;
828 ACPI_OBJECT obj[2];
829
830 acpicpu_pstate_reset(sc);
831
832 arg.Count = 2;
833 arg.Pointer = obj;
834
835 obj[0].Type = ACPI_TYPE_INTEGER;
836 obj[1].Type = ACPI_TYPE_INTEGER;
837
838 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
839 obj[1].Integer.Value = acpicpu_pstate_max(sc);
840
841 if (sc->sc_passive != false)
842 (void)acpicpu_pstate_min(sc);
843
844 if (ACPI_FAILURE(rv))
845 return;
846
847 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
848 }
849
850 static void
851 acpicpu_pstate_reset(struct acpicpu_softc *sc)
852 {
853
854 sc->sc_pstate_max = 0;
855 sc->sc_pstate_min = sc->sc_pstate_count - 1;
856
857 }
858
859 static void
860 acpicpu_pstate_bios(void)
861 {
862 const uint8_t val = AcpiGbl_FADT.PstateControl;
863 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
864
865 if (addr == 0 || val == 0)
866 return;
867
868 (void)AcpiOsWritePort(addr, val, 8);
869 }
870
871 int
872 acpicpu_pstate_get(struct acpicpu_softc *sc, uint32_t *freq)
873 {
874 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
875 struct acpicpu_pstate *ps = NULL;
876 uint32_t i, val = 0;
877 uint64_t addr;
878 uint8_t width;
879 int rv;
880
881 if (sc->sc_cold != false) {
882 rv = EBUSY;
883 goto fail;
884 }
885
886 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
887 rv = ENODEV;
888 goto fail;
889 }
890
891 mutex_enter(&sc->sc_mtx);
892
893 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) {
894 *freq = sc->sc_pstate_current;
895 mutex_exit(&sc->sc_mtx);
896 return 0;
897 }
898
899 mutex_exit(&sc->sc_mtx);
900
901 switch (method) {
902
903 case ACPI_ADR_SPACE_FIXED_HARDWARE:
904
905 rv = acpicpu_md_pstate_get(sc, freq);
906
907 if (rv != 0)
908 goto fail;
909
910 break;
911
912 case ACPI_ADR_SPACE_SYSTEM_IO:
913
914 addr = sc->sc_pstate_status.reg_addr;
915 width = sc->sc_pstate_status.reg_bitwidth;
916
917 (void)AcpiOsReadPort(addr, &val, width);
918
919 if (val == 0) {
920 rv = EIO;
921 goto fail;
922 }
923
924 for (i = 0; i < sc->sc_pstate_count; i++) {
925
926 if (sc->sc_pstate[i].ps_freq == 0)
927 continue;
928
929 if (val == sc->sc_pstate[i].ps_status) {
930 ps = &sc->sc_pstate[i];
931 break;
932 }
933 }
934
935 if (__predict_false(ps == NULL)) {
936 rv = EIO;
937 goto fail;
938 }
939
940 *freq = ps->ps_freq;
941 break;
942
943 default:
944 rv = ENOTTY;
945 goto fail;
946 }
947
948 mutex_enter(&sc->sc_mtx);
949 sc->sc_pstate_current = *freq;
950 mutex_exit(&sc->sc_mtx);
951
952 return 0;
953
954 fail:
955 aprint_error_dev(sc->sc_dev, "failed "
956 "to get frequency (err %d)\n", rv);
957
958 mutex_enter(&sc->sc_mtx);
959 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
960 mutex_exit(&sc->sc_mtx);
961
962 return rv;
963 }
964
965 int
966 acpicpu_pstate_set(struct acpicpu_softc *sc, uint32_t freq)
967 {
968 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
969 struct acpicpu_pstate *ps = NULL;
970 uint32_t i, val;
971 uint64_t addr;
972 uint8_t width;
973 int rv;
974
975 if (sc->sc_cold != false) {
976 rv = EBUSY;
977 goto fail;
978 }
979
980 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
981 rv = ENODEV;
982 goto fail;
983 }
984
985 mutex_enter(&sc->sc_mtx);
986
987 if (sc->sc_pstate_current == freq) {
988 mutex_exit(&sc->sc_mtx);
989 return 0;
990 }
991
992 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
993
994 if (sc->sc_pstate[i].ps_freq == 0)
995 continue;
996
997 if (sc->sc_pstate[i].ps_freq == freq) {
998 ps = &sc->sc_pstate[i];
999 break;
1000 }
1001 }
1002
1003 mutex_exit(&sc->sc_mtx);
1004
1005 if (__predict_false(ps == NULL)) {
1006 rv = EINVAL;
1007 goto fail;
1008 }
1009
1010 switch (method) {
1011
1012 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1013
1014 rv = acpicpu_md_pstate_set(ps);
1015
1016 if (rv != 0)
1017 goto fail;
1018
1019 break;
1020
1021 case ACPI_ADR_SPACE_SYSTEM_IO:
1022
1023 addr = sc->sc_pstate_control.reg_addr;
1024 width = sc->sc_pstate_control.reg_bitwidth;
1025
1026 (void)AcpiOsWritePort(addr, ps->ps_control, width);
1027
1028 addr = sc->sc_pstate_status.reg_addr;
1029 width = sc->sc_pstate_status.reg_bitwidth;
1030
1031 /*
1032 * Some systems take longer to respond
1033 * than the reported worst-case latency.
1034 */
1035 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
1036
1037 (void)AcpiOsReadPort(addr, &val, width);
1038
1039 if (val == ps->ps_status)
1040 break;
1041
1042 DELAY(ps->ps_latency);
1043 }
1044
1045 if (i == ACPICPU_P_STATE_RETRY) {
1046 rv = EAGAIN;
1047 goto fail;
1048 }
1049
1050 break;
1051
1052 default:
1053 rv = ENOTTY;
1054 goto fail;
1055 }
1056
1057 mutex_enter(&sc->sc_mtx);
1058 ps->ps_evcnt.ev_count++;
1059 sc->sc_pstate_current = freq;
1060 mutex_exit(&sc->sc_mtx);
1061
1062 return 0;
1063
1064 fail:
1065 aprint_error_dev(sc->sc_dev, "failed to set "
1066 "frequency to %u (err %d)\n", freq, rv);
1067
1068 mutex_enter(&sc->sc_mtx);
1069 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
1070 mutex_exit(&sc->sc_mtx);
1071
1072 return rv;
1073 }
1074