acpi_cpu_pstate.c revision 1.45.2.4 1 /* $NetBSD: acpi_cpu_pstate.c,v 1.45.2.4 2011/04/21 01:41:45 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.45.2.4 2011/04/21 01:41:45 rmind Exp $");
31
32 #include <sys/param.h>
33 #include <sys/kmem.h>
34 #include <sys/once.h>
35 #include <sys/xcall.h>
36
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40
41 #define _COMPONENT ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME ("acpi_cpu_pstate")
43
44 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *);
45 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
46 ACPI_OBJECT *);
47 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *);
48 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
49 ACPI_OBJECT *);
50 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_pstate_dep(struct acpicpu_softc *);
52 static int acpicpu_pstate_max(struct acpicpu_softc *);
53 static int acpicpu_pstate_min(struct acpicpu_softc *);
54 static void acpicpu_pstate_change(struct acpicpu_softc *);
55 static void acpicpu_pstate_reset(struct acpicpu_softc *);
56 static void acpicpu_pstate_bios(void);
57 static void acpicpu_pstate_set_xcall(void *, void *);
58
59 extern struct acpicpu_softc **acpicpu_sc;
60
61 void
62 acpicpu_pstate_attach(device_t self)
63 {
64 struct acpicpu_softc *sc = device_private(self);
65 const char *str;
66 ACPI_HANDLE tmp;
67 ACPI_STATUS rv;
68
69 rv = acpicpu_pstate_pss(sc);
70
71 if (ACPI_FAILURE(rv)) {
72 str = "_PSS";
73 goto fail;
74 }
75
76 /*
77 * Append additional information from the extended _PSS,
78 * if available. Note that XPSS can not be used on Intel
79 * systems that use either _PDC or _OSC. From the XPSS
80 * method specification:
81 *
82 * "The platform must not require the use of the
83 * optional _PDC or _OSC methods to coordinate
84 * between the operating system and firmware for
85 * the purposes of enabling specific processor
86 * power management features or implementations."
87 */
88 if (sc->sc_cap == 0) {
89
90 rv = acpicpu_pstate_xpss(sc);
91
92 if (ACPI_SUCCESS(rv))
93 sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
94 }
95
96 rv = acpicpu_pstate_pct(sc);
97
98 if (ACPI_FAILURE(rv)) {
99 str = "_PCT";
100 goto fail;
101 }
102
103 /*
104 * The ACPI 3.0 and 4.0 specifications mandate three
105 * objects for P-states: _PSS, _PCT, and _PPC. A less
106 * strict wording is however used in the earlier 2.0
107 * standard, and some systems conforming to ACPI 2.0
108 * do not have _PPC, the method for dynamic maximum.
109 */
110 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
111
112 if (ACPI_FAILURE(rv))
113 aprint_debug_dev(self, "_PPC missing\n");
114
115 /*
116 * Carry out MD initialization.
117 */
118 rv = acpicpu_md_pstate_init(sc);
119
120 if (rv != 0) {
121 rv = AE_SUPPORT;
122 goto fail;
123 }
124
125 /*
126 * Query the optional _PSD.
127 */
128 rv = acpicpu_pstate_dep(sc);
129
130 if (ACPI_SUCCESS(rv))
131 sc->sc_flags |= ACPICPU_FLAG_P_DEP;
132
133 sc->sc_flags |= ACPICPU_FLAG_P;
134
135 acpicpu_pstate_bios();
136 acpicpu_pstate_reset(sc);
137
138 return;
139
140 fail:
141 switch (rv) {
142
143 case AE_NOT_FOUND:
144 return;
145
146 case AE_SUPPORT:
147 aprint_verbose_dev(self, "P-states not supported\n");
148 return;
149
150 default:
151 aprint_error_dev(self, "failed to evaluate "
152 "%s: %s\n", str, AcpiFormatException(rv));
153 }
154 }
155
156 int
157 acpicpu_pstate_detach(device_t self)
158 {
159 struct acpicpu_softc *sc = device_private(self);
160 static ONCE_DECL(once_detach);
161 size_t size;
162 int rv;
163
164 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
165 return 0;
166
167 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop);
168
169 if (rv != 0)
170 return rv;
171
172 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
173
174 if (sc->sc_pstate != NULL)
175 kmem_free(sc->sc_pstate, size);
176
177 sc->sc_flags &= ~ACPICPU_FLAG_P;
178
179 return 0;
180 }
181
182 void
183 acpicpu_pstate_start(device_t self)
184 {
185 struct acpicpu_softc *sc = device_private(self);
186 struct acpicpu_pstate *ps;
187 uint32_t i;
188 int rv;
189
190 rv = acpicpu_md_pstate_start(sc);
191
192 if (rv != 0)
193 goto fail;
194
195 /*
196 * Initialize the states to P0.
197 */
198 for (i = 0, rv = ENXIO; i < sc->sc_pstate_count; i++) {
199
200 ps = &sc->sc_pstate[i];
201
202 if (ps->ps_freq != 0) {
203 acpicpu_pstate_set(sc->sc_ci, ps->ps_freq);
204 return;
205 }
206 }
207
208 fail:
209 sc->sc_flags &= ~ACPICPU_FLAG_P;
210 aprint_error_dev(self, "failed to start P-states (err %d)\n", rv);
211 }
212
213 void
214 acpicpu_pstate_suspend(void *aux)
215 {
216 struct acpicpu_pstate *ps = NULL;
217 struct acpicpu_softc *sc;
218 struct cpu_info *ci;
219 device_t self = aux;
220 uint64_t xc;
221 int32_t i;
222
223 sc = device_private(self);
224 ci = sc->sc_ci;
225
226 /*
227 * Reset any dynamic limits.
228 */
229 mutex_enter(&sc->sc_mtx);
230 acpicpu_pstate_reset(sc);
231
232 /*
233 * Following design notes for Windows, we set the highest
234 * P-state when entering any of the system sleep states.
235 * When resuming, the saved P-state will be restored.
236 *
237 * Microsoft Corporation: Windows Native Processor
238 * Performance Control. Version 1.1a, November, 2002.
239 */
240 sc->sc_pstate_saved = sc->sc_pstate_current;
241
242 for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
243
244 if (sc->sc_pstate[i].ps_freq != 0) {
245 ps = &sc->sc_pstate[i];
246 break;
247 }
248 }
249
250 if (__predict_false(ps == NULL)) {
251 mutex_exit(&sc->sc_mtx);
252 return;
253 }
254
255 if (sc->sc_pstate_saved == ps->ps_freq) {
256 mutex_exit(&sc->sc_mtx);
257 return;
258 }
259
260 mutex_exit(&sc->sc_mtx);
261
262 xc = xc_unicast(0, acpicpu_pstate_set_xcall, &ps->ps_freq, NULL, ci);
263 xc_wait(xc);
264 }
265
266 void
267 acpicpu_pstate_resume(void *aux)
268 {
269 struct acpicpu_softc *sc;
270 device_t self = aux;
271 uint32_t freq;
272 uint64_t xc;
273
274 sc = device_private(self);
275 freq = sc->sc_pstate_saved;
276
277 xc = xc_unicast(0, acpicpu_pstate_set_xcall, &freq, NULL, sc->sc_ci);
278 xc_wait(xc);
279 }
280
281 void
282 acpicpu_pstate_callback(void *aux)
283 {
284 struct acpicpu_softc *sc;
285 device_t self = aux;
286 uint32_t freq;
287 uint64_t xc;
288
289 sc = device_private(self);
290
291 mutex_enter(&sc->sc_mtx);
292 acpicpu_pstate_change(sc);
293
294 freq = sc->sc_pstate[sc->sc_pstate_max].ps_freq;
295
296 if (sc->sc_pstate_saved == 0)
297 sc->sc_pstate_saved = sc->sc_pstate_current;
298
299 if (sc->sc_pstate_saved <= freq) {
300 freq = sc->sc_pstate_saved;
301 sc->sc_pstate_saved = 0;
302 }
303
304 mutex_exit(&sc->sc_mtx);
305
306 xc = xc_unicast(0, acpicpu_pstate_set_xcall, &freq, NULL, sc->sc_ci);
307 xc_wait(xc);
308 }
309
310 ACPI_STATUS
311 acpicpu_pstate_pss(struct acpicpu_softc *sc)
312 {
313 struct acpicpu_pstate *ps;
314 ACPI_OBJECT *obj;
315 ACPI_BUFFER buf;
316 ACPI_STATUS rv;
317 uint32_t count;
318 uint32_t i, j;
319
320 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
321
322 if (ACPI_FAILURE(rv))
323 return rv;
324
325 obj = buf.Pointer;
326
327 if (obj->Type != ACPI_TYPE_PACKAGE) {
328 rv = AE_TYPE;
329 goto out;
330 }
331
332 sc->sc_pstate_count = obj->Package.Count;
333
334 if (sc->sc_pstate_count == 0) {
335 rv = AE_NOT_EXIST;
336 goto out;
337 }
338
339 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
340 rv = AE_LIMIT;
341 goto out;
342 }
343
344 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
345 sizeof(struct acpicpu_pstate), KM_SLEEP);
346
347 if (sc->sc_pstate == NULL) {
348 rv = AE_NO_MEMORY;
349 goto out;
350 }
351
352 for (count = i = 0; i < sc->sc_pstate_count; i++) {
353
354 ps = &sc->sc_pstate[i];
355 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
356
357 if (ACPI_FAILURE(rv)) {
358 aprint_error_dev(sc->sc_dev, "failed to add "
359 "P-state: %s\n", AcpiFormatException(rv));
360 ps->ps_freq = 0;
361 continue;
362 }
363
364 for (j = 0; j < i; j++) {
365
366 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
367 ps->ps_freq = 0;
368 break;
369 }
370 }
371
372 if (ps->ps_freq != 0)
373 count++;
374 }
375
376 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
377
378 out:
379 if (buf.Pointer != NULL)
380 ACPI_FREE(buf.Pointer);
381
382 return rv;
383 }
384
385 static ACPI_STATUS
386 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
387 {
388 ACPI_OBJECT *elm;
389 int i;
390
391 if (obj->Type != ACPI_TYPE_PACKAGE)
392 return AE_TYPE;
393
394 if (obj->Package.Count != 6)
395 return AE_BAD_DATA;
396
397 elm = obj->Package.Elements;
398
399 for (i = 0; i < 6; i++) {
400
401 if (elm[i].Type != ACPI_TYPE_INTEGER)
402 return AE_TYPE;
403
404 if (elm[i].Integer.Value > UINT32_MAX)
405 return AE_AML_NUMERIC_OVERFLOW;
406 }
407
408 ps->ps_freq = elm[0].Integer.Value;
409 ps->ps_power = elm[1].Integer.Value;
410 ps->ps_latency = elm[2].Integer.Value;
411 ps->ps_latency_bm = elm[3].Integer.Value;
412 ps->ps_control = elm[4].Integer.Value;
413 ps->ps_status = elm[5].Integer.Value;
414
415 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
416 return AE_BAD_DECIMAL_CONSTANT;
417
418 if (ps->ps_latency == 0 || ps->ps_latency > 1000)
419 ps->ps_latency = 1;
420
421 return AE_OK;
422 }
423
424 static ACPI_STATUS
425 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
426 {
427 struct acpicpu_pstate *ps;
428 ACPI_OBJECT *obj;
429 ACPI_BUFFER buf;
430 ACPI_STATUS rv;
431 uint32_t i = 0;
432
433 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
434
435 if (ACPI_FAILURE(rv))
436 goto out;
437
438 obj = buf.Pointer;
439
440 if (obj->Type != ACPI_TYPE_PACKAGE) {
441 rv = AE_TYPE;
442 goto out;
443 }
444
445 if (obj->Package.Count != sc->sc_pstate_count) {
446 rv = AE_LIMIT;
447 goto out;
448 }
449
450 while (i < sc->sc_pstate_count) {
451
452 ps = &sc->sc_pstate[i];
453 acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
454
455 i++;
456 }
457
458 out:
459 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
460 aprint_error_dev(sc->sc_dev, "failed to evaluate "
461 "XPSS: %s\n", AcpiFormatException(rv));
462
463 if (buf.Pointer != NULL)
464 ACPI_FREE(buf.Pointer);
465
466 return rv;
467 }
468
469 static ACPI_STATUS
470 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
471 {
472 ACPI_OBJECT *elm;
473 int i;
474
475 if (obj->Type != ACPI_TYPE_PACKAGE)
476 return AE_TYPE;
477
478 if (obj->Package.Count != 8)
479 return AE_BAD_DATA;
480
481 elm = obj->Package.Elements;
482
483 for (i = 0; i < 4; i++) {
484
485 if (elm[i].Type != ACPI_TYPE_INTEGER)
486 return AE_TYPE;
487
488 if (elm[i].Integer.Value > UINT32_MAX)
489 return AE_AML_NUMERIC_OVERFLOW;
490 }
491
492 for (; i < 8; i++) {
493
494 if (elm[i].Type != ACPI_TYPE_BUFFER)
495 return AE_TYPE;
496
497 if (elm[i].Buffer.Length != 8)
498 return AE_LIMIT;
499 }
500
501 /*
502 * Only overwrite the elements that were
503 * not available from the conventional _PSS.
504 */
505 if (ps->ps_freq == 0)
506 ps->ps_freq = elm[0].Integer.Value;
507
508 if (ps->ps_power == 0)
509 ps->ps_power = elm[1].Integer.Value;
510
511 if (ps->ps_latency == 0)
512 ps->ps_latency = elm[2].Integer.Value;
513
514 if (ps->ps_latency_bm == 0)
515 ps->ps_latency_bm = elm[3].Integer.Value;
516
517 if (ps->ps_control == 0)
518 ps->ps_control = ACPI_GET64(elm[4].Buffer.Pointer);
519
520 if (ps->ps_status == 0)
521 ps->ps_status = ACPI_GET64(elm[5].Buffer.Pointer);
522
523 if (ps->ps_control_mask == 0)
524 ps->ps_control_mask = ACPI_GET64(elm[6].Buffer.Pointer);
525
526 if (ps->ps_status_mask == 0)
527 ps->ps_status_mask = ACPI_GET64(elm[7].Buffer.Pointer);
528
529 ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
530
531 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
532 return AE_BAD_DECIMAL_CONSTANT;
533
534 if (ps->ps_latency == 0 || ps->ps_latency > 1000)
535 ps->ps_latency = 1;
536
537 return AE_OK;
538 }
539
540 ACPI_STATUS
541 acpicpu_pstate_pct(struct acpicpu_softc *sc)
542 {
543 static const size_t size = sizeof(struct acpicpu_reg);
544 struct acpicpu_reg *reg[2];
545 struct acpicpu_pstate *ps;
546 ACPI_OBJECT *elm, *obj;
547 ACPI_BUFFER buf;
548 ACPI_STATUS rv;
549 uint8_t width;
550 uint32_t i;
551
552 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
553
554 if (ACPI_FAILURE(rv))
555 return rv;
556
557 obj = buf.Pointer;
558
559 if (obj->Type != ACPI_TYPE_PACKAGE) {
560 rv = AE_TYPE;
561 goto out;
562 }
563
564 if (obj->Package.Count != 2) {
565 rv = AE_LIMIT;
566 goto out;
567 }
568
569 for (i = 0; i < 2; i++) {
570
571 elm = &obj->Package.Elements[i];
572
573 if (elm->Type != ACPI_TYPE_BUFFER) {
574 rv = AE_TYPE;
575 goto out;
576 }
577
578 if (size > elm->Buffer.Length) {
579 rv = AE_AML_BAD_RESOURCE_LENGTH;
580 goto out;
581 }
582
583 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
584
585 switch (reg[i]->reg_spaceid) {
586
587 case ACPI_ADR_SPACE_SYSTEM_IO:
588
589 if (reg[i]->reg_addr == 0) {
590 rv = AE_AML_ILLEGAL_ADDRESS;
591 goto out;
592 }
593
594 width = reg[i]->reg_bitwidth;
595
596 if (width + reg[i]->reg_bitoffset > 32) {
597 rv = AE_AML_BAD_RESOURCE_VALUE;
598 goto out;
599 }
600
601 if (width != 8 && width != 16 && width != 32) {
602 rv = AE_AML_BAD_RESOURCE_VALUE;
603 goto out;
604 }
605
606 break;
607
608 case ACPI_ADR_SPACE_FIXED_HARDWARE:
609
610 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
611
612 if (reg[i]->reg_bitwidth != 64) {
613 rv = AE_AML_BAD_RESOURCE_VALUE;
614 goto out;
615 }
616
617 if (reg[i]->reg_bitoffset != 0) {
618 rv = AE_AML_BAD_RESOURCE_VALUE;
619 goto out;
620 }
621
622 break;
623 }
624
625 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
626 rv = AE_SUPPORT;
627 goto out;
628 }
629
630 break;
631
632 default:
633 rv = AE_AML_INVALID_SPACE_ID;
634 goto out;
635 }
636 }
637
638 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
639 rv = AE_AML_INVALID_SPACE_ID;
640 goto out;
641 }
642
643 (void)memcpy(&sc->sc_pstate_control, reg[0], size);
644 (void)memcpy(&sc->sc_pstate_status, reg[1], size);
645
646 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
647 goto out;
648
649 /*
650 * At the very least, mandate that
651 * XPSS supplies the control address.
652 */
653 if (sc->sc_pstate_control.reg_addr == 0) {
654 rv = AE_AML_BAD_RESOURCE_LENGTH;
655 goto out;
656 }
657
658 /*
659 * If XPSS is present, copy the MSR addresses
660 * to the P-state structures for convenience.
661 */
662 for (i = 0; i < sc->sc_pstate_count; i++) {
663
664 ps = &sc->sc_pstate[i];
665
666 if (ps->ps_freq == 0)
667 continue;
668
669 ps->ps_status_addr = sc->sc_pstate_status.reg_addr;
670 ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
671 }
672
673 out:
674 if (buf.Pointer != NULL)
675 ACPI_FREE(buf.Pointer);
676
677 return rv;
678 }
679
680 static ACPI_STATUS
681 acpicpu_pstate_dep(struct acpicpu_softc *sc)
682 {
683 ACPI_OBJECT *elm, *obj;
684 ACPI_BUFFER buf;
685 ACPI_STATUS rv;
686 uint32_t val;
687 uint8_t i, n;
688
689 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSD", &buf);
690
691 if (ACPI_FAILURE(rv))
692 goto out;
693
694 obj = buf.Pointer;
695
696 if (obj->Type != ACPI_TYPE_PACKAGE) {
697 rv = AE_TYPE;
698 goto out;
699 }
700
701 if (obj->Package.Count != 1) {
702 rv = AE_LIMIT;
703 goto out;
704 }
705
706 elm = &obj->Package.Elements[0];
707
708 if (obj->Type != ACPI_TYPE_PACKAGE) {
709 rv = AE_TYPE;
710 goto out;
711 }
712
713 n = elm->Package.Count;
714
715 if (n != 5) {
716 rv = AE_LIMIT;
717 goto out;
718 }
719
720 elm = elm->Package.Elements;
721
722 for (i = 0; i < n; i++) {
723
724 if (elm[i].Type != ACPI_TYPE_INTEGER) {
725 rv = AE_TYPE;
726 goto out;
727 }
728
729 if (elm[i].Integer.Value > UINT32_MAX) {
730 rv = AE_AML_NUMERIC_OVERFLOW;
731 goto out;
732 }
733 }
734
735 val = elm[1].Integer.Value;
736
737 if (val != 0)
738 aprint_debug_dev(sc->sc_dev, "invalid revision in _PSD\n");
739
740 val = elm[3].Integer.Value;
741
742 if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) {
743 rv = AE_AML_BAD_RESOURCE_VALUE;
744 goto out;
745 }
746
747 val = elm[4].Integer.Value;
748
749 if (val > sc->sc_ncpus) {
750 rv = AE_BAD_VALUE;
751 goto out;
752 }
753
754 sc->sc_pstate_dep.dep_domain = elm[2].Integer.Value;
755 sc->sc_pstate_dep.dep_type = elm[3].Integer.Value;
756 sc->sc_pstate_dep.dep_ncpus = elm[4].Integer.Value;
757
758 out:
759 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
760 aprint_debug_dev(sc->sc_dev, "failed to evaluate "
761 "_PSD: %s\n", AcpiFormatException(rv));
762
763 if (buf.Pointer != NULL)
764 ACPI_FREE(buf.Pointer);
765
766 return rv;
767 }
768
769 static int
770 acpicpu_pstate_max(struct acpicpu_softc *sc)
771 {
772 ACPI_INTEGER val;
773 ACPI_STATUS rv;
774
775 /*
776 * Evaluate the currently highest P-state that can be used.
777 * If available, we can use either this state or any lower
778 * power (i.e. higher numbered) state from the _PSS object.
779 * Note that the return value must match the _OST parameter.
780 */
781 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
782
783 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
784
785 if (sc->sc_pstate[val].ps_freq != 0) {
786 sc->sc_pstate_max = val;
787 return 0;
788 }
789 }
790
791 return 1;
792 }
793
794 static int
795 acpicpu_pstate_min(struct acpicpu_softc *sc)
796 {
797 ACPI_INTEGER val;
798 ACPI_STATUS rv;
799
800 /*
801 * The _PDL object defines the minimum when passive cooling
802 * is being performed. If available, we can use the returned
803 * state or any higher power (i.e. lower numbered) state.
804 */
805 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
806
807 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
808
809 if (sc->sc_pstate[val].ps_freq == 0)
810 return 1;
811
812 if (val >= sc->sc_pstate_max) {
813 sc->sc_pstate_min = val;
814 return 0;
815 }
816 }
817
818 return 1;
819 }
820
821 static void
822 acpicpu_pstate_change(struct acpicpu_softc *sc)
823 {
824 static ACPI_STATUS rv = AE_OK;
825 ACPI_OBJECT_LIST arg;
826 ACPI_OBJECT obj[2];
827 static int val = 0;
828
829 acpicpu_pstate_reset(sc);
830
831 /*
832 * Cache the checks as the optional
833 * _PDL and _OST are rarely present.
834 */
835 if (val == 0)
836 val = acpicpu_pstate_min(sc);
837
838 arg.Count = 2;
839 arg.Pointer = obj;
840
841 obj[0].Type = ACPI_TYPE_INTEGER;
842 obj[1].Type = ACPI_TYPE_INTEGER;
843
844 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
845 obj[1].Integer.Value = acpicpu_pstate_max(sc);
846
847 if (ACPI_FAILURE(rv))
848 return;
849
850 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
851 }
852
853 static void
854 acpicpu_pstate_reset(struct acpicpu_softc *sc)
855 {
856
857 sc->sc_pstate_max = 0;
858 sc->sc_pstate_min = sc->sc_pstate_count - 1;
859
860 }
861
862 static void
863 acpicpu_pstate_bios(void)
864 {
865 const uint8_t val = AcpiGbl_FADT.PstateControl;
866 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
867
868 if (addr == 0 || val == 0)
869 return;
870
871 (void)AcpiOsWritePort(addr, val, 8);
872 }
873
874 int
875 acpicpu_pstate_get(struct cpu_info *ci, uint32_t *freq)
876 {
877 struct acpicpu_pstate *ps = NULL;
878 struct acpicpu_softc *sc;
879 uint32_t i, val = 0;
880 uint64_t addr;
881 uint8_t width;
882 int rv;
883
884 sc = acpicpu_sc[ci->ci_acpiid];
885
886 if (__predict_false(sc == NULL)) {
887 rv = ENXIO;
888 goto fail;
889 }
890
891 if (__predict_false(sc->sc_cold != false)) {
892 rv = EBUSY;
893 goto fail;
894 }
895
896 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
897 rv = ENODEV;
898 goto fail;
899 }
900
901 mutex_enter(&sc->sc_mtx);
902
903 /*
904 * Use the cached value, if available.
905 */
906 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) {
907 *freq = sc->sc_pstate_current;
908 mutex_exit(&sc->sc_mtx);
909 return 0;
910 }
911
912 mutex_exit(&sc->sc_mtx);
913
914 switch (sc->sc_pstate_status.reg_spaceid) {
915
916 case ACPI_ADR_SPACE_FIXED_HARDWARE:
917
918 rv = acpicpu_md_pstate_get(sc, freq);
919
920 if (__predict_false(rv != 0))
921 goto fail;
922
923 break;
924
925 case ACPI_ADR_SPACE_SYSTEM_IO:
926
927 addr = sc->sc_pstate_status.reg_addr;
928 width = sc->sc_pstate_status.reg_bitwidth;
929
930 (void)AcpiOsReadPort(addr, &val, width);
931
932 if (val == 0) {
933 rv = EIO;
934 goto fail;
935 }
936
937 for (i = 0; i < sc->sc_pstate_count; i++) {
938
939 if (sc->sc_pstate[i].ps_freq == 0)
940 continue;
941
942 if (val == sc->sc_pstate[i].ps_status) {
943 ps = &sc->sc_pstate[i];
944 break;
945 }
946 }
947
948 if (ps == NULL) {
949 rv = EIO;
950 goto fail;
951 }
952
953 *freq = ps->ps_freq;
954 break;
955
956 default:
957 rv = ENOTTY;
958 goto fail;
959 }
960
961 mutex_enter(&sc->sc_mtx);
962 sc->sc_pstate_current = *freq;
963 mutex_exit(&sc->sc_mtx);
964
965 return 0;
966
967 fail:
968 aprint_error_dev(sc->sc_dev, "failed "
969 "to get frequency (err %d)\n", rv);
970
971 mutex_enter(&sc->sc_mtx);
972 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
973 mutex_exit(&sc->sc_mtx);
974
975 return rv;
976 }
977
978 void
979 acpicpu_pstate_set(struct cpu_info *ci, uint32_t freq)
980 {
981 uint64_t xc;
982
983 xc = xc_broadcast(0, acpicpu_pstate_set_xcall, &freq, NULL);
984 xc_wait(xc);
985 }
986
987 static void
988 acpicpu_pstate_set_xcall(void *arg1, void *arg2)
989 {
990 struct acpicpu_pstate *ps = NULL;
991 struct cpu_info *ci = curcpu();
992 struct acpicpu_softc *sc;
993 uint32_t freq, i, val;
994 uint64_t addr;
995 uint8_t width;
996 int rv;
997
998 freq = *(uint32_t *)arg1;
999 sc = acpicpu_sc[ci->ci_acpiid];
1000
1001 if (__predict_false(sc == NULL)) {
1002 rv = ENXIO;
1003 goto fail;
1004 }
1005
1006 if (__predict_false(sc->sc_cold != false)) {
1007 rv = EBUSY;
1008 goto fail;
1009 }
1010
1011 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
1012 rv = ENODEV;
1013 goto fail;
1014 }
1015
1016 mutex_enter(&sc->sc_mtx);
1017
1018 if (sc->sc_pstate_current == freq) {
1019 mutex_exit(&sc->sc_mtx);
1020 return;
1021 }
1022
1023 /*
1024 * Verify that the requested frequency is available.
1025 *
1026 * The access needs to be protected since the currently
1027 * available maximum and minimum may change dynamically.
1028 */
1029 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
1030
1031 if (__predict_false(sc->sc_pstate[i].ps_freq == 0))
1032 continue;
1033
1034 if (sc->sc_pstate[i].ps_freq == freq) {
1035 ps = &sc->sc_pstate[i];
1036 break;
1037 }
1038 }
1039
1040 mutex_exit(&sc->sc_mtx);
1041
1042 if (__predict_false(ps == NULL)) {
1043 rv = EINVAL;
1044 goto fail;
1045 }
1046
1047 switch (sc->sc_pstate_control.reg_spaceid) {
1048
1049 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1050
1051 rv = acpicpu_md_pstate_set(ps);
1052
1053 if (__predict_false(rv != 0))
1054 goto fail;
1055
1056 break;
1057
1058 case ACPI_ADR_SPACE_SYSTEM_IO:
1059
1060 addr = sc->sc_pstate_control.reg_addr;
1061 width = sc->sc_pstate_control.reg_bitwidth;
1062
1063 (void)AcpiOsWritePort(addr, ps->ps_control, width);
1064
1065 addr = sc->sc_pstate_status.reg_addr;
1066 width = sc->sc_pstate_status.reg_bitwidth;
1067
1068 /*
1069 * Some systems take longer to respond
1070 * than the reported worst-case latency.
1071 */
1072 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
1073
1074 (void)AcpiOsReadPort(addr, &val, width);
1075
1076 if (val == ps->ps_status)
1077 break;
1078
1079 DELAY(ps->ps_latency);
1080 }
1081
1082 if (i == ACPICPU_P_STATE_RETRY) {
1083 rv = EAGAIN;
1084 goto fail;
1085 }
1086
1087 break;
1088
1089 default:
1090 rv = ENOTTY;
1091 goto fail;
1092 }
1093
1094 mutex_enter(&sc->sc_mtx);
1095 ps->ps_evcnt.ev_count++;
1096 sc->sc_pstate_current = freq;
1097 mutex_exit(&sc->sc_mtx);
1098
1099 return;
1100
1101 fail:
1102 aprint_error_dev(sc->sc_dev, "failed to set "
1103 "frequency to %u (err %d)\n", freq, rv);
1104
1105 mutex_enter(&sc->sc_mtx);
1106 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
1107 mutex_exit(&sc->sc_mtx);
1108 }
1109