acpi_cpu_pstate.c revision 1.46 1 /* $NetBSD: acpi_cpu_pstate.c,v 1.46 2011/03/17 15:59:36 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.46 2011/03/17 15:59:36 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/kmem.h>
34 #include <sys/once.h>
35 #include <sys/xcall.h>
36
37 #include <dev/acpi/acpireg.h>
38 #include <dev/acpi/acpivar.h>
39 #include <dev/acpi/acpi_cpu.h>
40
41 #define _COMPONENT ACPI_BUS_COMPONENT
42 ACPI_MODULE_NAME ("acpi_cpu_pstate")
43
44 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *);
45 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
46 ACPI_OBJECT *);
47 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *);
48 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *,
49 ACPI_OBJECT *);
50 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_pstate_dep(struct acpicpu_softc *);
52 static int acpicpu_pstate_max(struct acpicpu_softc *);
53 static int acpicpu_pstate_min(struct acpicpu_softc *);
54 static void acpicpu_pstate_change(struct acpicpu_softc *);
55 static void acpicpu_pstate_reset(struct acpicpu_softc *);
56 static void acpicpu_pstate_bios(void);
57 static void acpicpu_pstate_set_xcall(void *, void *);
58
59 extern struct acpicpu_softc **acpicpu_sc;
60
61 void
62 acpicpu_pstate_attach(device_t self)
63 {
64 struct acpicpu_softc *sc = device_private(self);
65 const char *str;
66 ACPI_HANDLE tmp;
67 ACPI_STATUS rv;
68
69 rv = acpicpu_pstate_pss(sc);
70
71 if (ACPI_FAILURE(rv)) {
72 str = "_PSS";
73 goto fail;
74 }
75
76 /*
77 * Append additional information from the extended _PSS,
78 * if available. Note that XPSS can not be used on Intel
79 * systems that use either _PDC or _OSC. From the XPSS
80 * method specification:
81 *
82 * "The platform must not require the use of the
83 * optional _PDC or _OSC methods to coordinate
84 * between the operating system and firmware for
85 * the purposes of enabling specific processor
86 * power management features or implementations."
87 */
88 if (sc->sc_cap == 0) {
89
90 rv = acpicpu_pstate_xpss(sc);
91
92 if (ACPI_SUCCESS(rv))
93 sc->sc_flags |= ACPICPU_FLAG_P_XPSS;
94 }
95
96 rv = acpicpu_pstate_pct(sc);
97
98 if (ACPI_FAILURE(rv)) {
99 str = "_PCT";
100 goto fail;
101 }
102
103 /*
104 * The ACPI 3.0 and 4.0 specifications mandate three
105 * objects for P-states: _PSS, _PCT, and _PPC. A less
106 * strict wording is however used in the earlier 2.0
107 * standard, and some systems conforming to ACPI 2.0
108 * do not have _PPC, the method for dynamic maximum.
109 */
110 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp);
111
112 if (ACPI_FAILURE(rv))
113 aprint_debug_dev(self, "_PPC missing\n");
114
115 /*
116 * Carry out MD initialization.
117 */
118 rv = acpicpu_md_pstate_init(sc);
119
120 if (rv != 0) {
121 rv = AE_SUPPORT;
122 goto fail;
123 }
124
125 /*
126 * Query the optional _PSD.
127 */
128 rv = acpicpu_pstate_dep(sc);
129
130 if (ACPI_SUCCESS(rv))
131 sc->sc_flags |= ACPICPU_FLAG_P_DEP;
132
133 sc->sc_flags |= ACPICPU_FLAG_P;
134
135 acpicpu_pstate_bios();
136 acpicpu_pstate_reset(sc);
137
138 return;
139
140 fail:
141 switch (rv) {
142
143 case AE_NOT_FOUND:
144 return;
145
146 case AE_SUPPORT:
147 aprint_verbose_dev(self, "P-states not supported\n");
148 return;
149
150 default:
151 aprint_error_dev(self, "failed to evaluate "
152 "%s: %s\n", str, AcpiFormatException(rv));
153 }
154 }
155
156 int
157 acpicpu_pstate_detach(device_t self)
158 {
159 struct acpicpu_softc *sc = device_private(self);
160 static ONCE_DECL(once_detach);
161 size_t size;
162 int rv;
163
164 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
165 return 0;
166
167 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop);
168
169 if (rv != 0)
170 return rv;
171
172 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
173
174 if (sc->sc_pstate != NULL)
175 kmem_free(sc->sc_pstate, size);
176
177 sc->sc_flags &= ~ACPICPU_FLAG_P;
178
179 return 0;
180 }
181
182 void
183 acpicpu_pstate_start(device_t self)
184 {
185 struct acpicpu_softc *sc = device_private(self);
186 struct acpicpu_pstate *ps;
187 uint32_t i;
188 int rv;
189
190 rv = acpicpu_md_pstate_start(sc);
191
192 if (rv != 0)
193 goto fail;
194
195 /*
196 * Initialize the states to P0.
197 */
198 for (i = 0, rv = ENXIO; i < sc->sc_pstate_count; i++) {
199
200 ps = &sc->sc_pstate[i];
201
202 if (ps->ps_freq != 0) {
203 acpicpu_pstate_set(sc->sc_ci, ps->ps_freq);
204 return;
205 }
206 }
207
208 fail:
209 sc->sc_flags &= ~ACPICPU_FLAG_P;
210 aprint_error_dev(self, "failed to start P-states (err %d)\n", rv);
211 }
212
213 bool
214 acpicpu_pstate_suspend(device_t self)
215 {
216 struct acpicpu_softc *sc = device_private(self);
217 struct acpicpu_pstate *ps = NULL;
218 struct cpu_info *ci = sc->sc_ci;
219 uint64_t xc;
220 int32_t i;
221
222 /*
223 * Reset any dynamic limits.
224 */
225 mutex_enter(&sc->sc_mtx);
226 acpicpu_pstate_reset(sc);
227
228 /*
229 * Following design notes for Windows, we set the highest
230 * P-state when entering any of the system sleep states.
231 * When resuming, the saved P-state will be restored.
232 *
233 * Microsoft Corporation: Windows Native Processor
234 * Performance Control. Version 1.1a, November, 2002.
235 */
236 sc->sc_pstate_saved = sc->sc_pstate_current;
237
238 for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
239
240 if (sc->sc_pstate[i].ps_freq != 0) {
241 ps = &sc->sc_pstate[i];
242 break;
243 }
244 }
245
246 mutex_exit(&sc->sc_mtx);
247
248 if (__predict_false(ps == NULL))
249 return true;
250
251 if (sc->sc_pstate_saved == ps->ps_freq)
252 return true;
253
254 xc = xc_unicast(0, acpicpu_pstate_set_xcall, &ps->ps_freq, NULL, ci);
255 xc_wait(xc);
256
257 return true;
258 }
259
260 bool
261 acpicpu_pstate_resume(device_t self)
262 {
263 struct acpicpu_softc *sc = device_private(self);
264 uint32_t freq = sc->sc_pstate_saved;
265 uint64_t xc;
266
267 xc = xc_unicast(0, acpicpu_pstate_set_xcall, &freq, NULL, sc->sc_ci);
268 xc_wait(xc);
269
270 return true;
271 }
272
273 void
274 acpicpu_pstate_callback(void *aux)
275 {
276 struct acpicpu_softc *sc;
277 device_t self = aux;
278 uint32_t old, new;
279
280 sc = device_private(self);
281
282 mutex_enter(&sc->sc_mtx);
283
284 old = sc->sc_pstate_max;
285 acpicpu_pstate_change(sc);
286 new = sc->sc_pstate_max;
287
288 if (old == new) {
289 mutex_exit(&sc->sc_mtx);
290 return;
291 }
292
293 mutex_exit(&sc->sc_mtx);
294
295 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "maximum frequency "
296 "changed from P%u (%u MHz) to P%u (%u MHz)\n",
297 old, sc->sc_pstate[old].ps_freq, new,
298 sc->sc_pstate[sc->sc_pstate_max].ps_freq));
299
300 acpicpu_pstate_set(sc->sc_ci, sc->sc_pstate[new].ps_freq);
301 }
302
303 ACPI_STATUS
304 acpicpu_pstate_pss(struct acpicpu_softc *sc)
305 {
306 struct acpicpu_pstate *ps;
307 ACPI_OBJECT *obj;
308 ACPI_BUFFER buf;
309 ACPI_STATUS rv;
310 uint32_t count;
311 uint32_t i, j;
312
313 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
314
315 if (ACPI_FAILURE(rv))
316 return rv;
317
318 obj = buf.Pointer;
319
320 if (obj->Type != ACPI_TYPE_PACKAGE) {
321 rv = AE_TYPE;
322 goto out;
323 }
324
325 sc->sc_pstate_count = obj->Package.Count;
326
327 if (sc->sc_pstate_count == 0) {
328 rv = AE_NOT_EXIST;
329 goto out;
330 }
331
332 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) {
333 rv = AE_LIMIT;
334 goto out;
335 }
336
337 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
338 sizeof(struct acpicpu_pstate), KM_SLEEP);
339
340 if (sc->sc_pstate == NULL) {
341 rv = AE_NO_MEMORY;
342 goto out;
343 }
344
345 for (count = i = 0; i < sc->sc_pstate_count; i++) {
346
347 ps = &sc->sc_pstate[i];
348 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
349
350 if (ACPI_FAILURE(rv)) {
351 aprint_error_dev(sc->sc_dev, "failed to add "
352 "P-state: %s\n", AcpiFormatException(rv));
353 ps->ps_freq = 0;
354 continue;
355 }
356
357 for (j = 0; j < i; j++) {
358
359 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
360 ps->ps_freq = 0;
361 break;
362 }
363 }
364
365 if (ps->ps_freq != 0)
366 count++;
367 }
368
369 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
370
371 out:
372 if (buf.Pointer != NULL)
373 ACPI_FREE(buf.Pointer);
374
375 return rv;
376 }
377
378 static ACPI_STATUS
379 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
380 {
381 ACPI_OBJECT *elm;
382 int i;
383
384 if (obj->Type != ACPI_TYPE_PACKAGE)
385 return AE_TYPE;
386
387 if (obj->Package.Count != 6)
388 return AE_BAD_DATA;
389
390 elm = obj->Package.Elements;
391
392 for (i = 0; i < 6; i++) {
393
394 if (elm[i].Type != ACPI_TYPE_INTEGER)
395 return AE_TYPE;
396
397 if (elm[i].Integer.Value > UINT32_MAX)
398 return AE_AML_NUMERIC_OVERFLOW;
399 }
400
401 ps->ps_freq = elm[0].Integer.Value;
402 ps->ps_power = elm[1].Integer.Value;
403 ps->ps_latency = elm[2].Integer.Value;
404 ps->ps_latency_bm = elm[3].Integer.Value;
405 ps->ps_control = elm[4].Integer.Value;
406 ps->ps_status = elm[5].Integer.Value;
407
408 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
409 return AE_BAD_DECIMAL_CONSTANT;
410
411 if (ps->ps_latency == 0 || ps->ps_latency > 1000)
412 ps->ps_latency = 1;
413
414 return AE_OK;
415 }
416
417 static ACPI_STATUS
418 acpicpu_pstate_xpss(struct acpicpu_softc *sc)
419 {
420 struct acpicpu_pstate *ps;
421 ACPI_OBJECT *obj;
422 ACPI_BUFFER buf;
423 ACPI_STATUS rv;
424 uint32_t i = 0;
425
426 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf);
427
428 if (ACPI_FAILURE(rv))
429 goto out;
430
431 obj = buf.Pointer;
432
433 if (obj->Type != ACPI_TYPE_PACKAGE) {
434 rv = AE_TYPE;
435 goto out;
436 }
437
438 if (obj->Package.Count != sc->sc_pstate_count) {
439 rv = AE_LIMIT;
440 goto out;
441 }
442
443 while (i < sc->sc_pstate_count) {
444
445 ps = &sc->sc_pstate[i];
446 acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]);
447
448 i++;
449 }
450
451 out:
452 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
453 aprint_error_dev(sc->sc_dev, "failed to evaluate "
454 "XPSS: %s\n", AcpiFormatException(rv));
455
456 if (buf.Pointer != NULL)
457 ACPI_FREE(buf.Pointer);
458
459 return rv;
460 }
461
462 static ACPI_STATUS
463 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
464 {
465 ACPI_OBJECT *elm;
466 int i;
467
468 if (obj->Type != ACPI_TYPE_PACKAGE)
469 return AE_TYPE;
470
471 if (obj->Package.Count != 8)
472 return AE_BAD_DATA;
473
474 elm = obj->Package.Elements;
475
476 for (i = 0; i < 4; i++) {
477
478 if (elm[i].Type != ACPI_TYPE_INTEGER)
479 return AE_TYPE;
480
481 if (elm[i].Integer.Value > UINT32_MAX)
482 return AE_AML_NUMERIC_OVERFLOW;
483 }
484
485 for (; i < 8; i++) {
486
487 if (elm[i].Type != ACPI_TYPE_BUFFER)
488 return AE_TYPE;
489
490 if (elm[i].Buffer.Length != 8)
491 return AE_LIMIT;
492 }
493
494 /*
495 * Only overwrite the elements that were
496 * not available from the conventional _PSS.
497 */
498 if (ps->ps_freq == 0)
499 ps->ps_freq = elm[0].Integer.Value;
500
501 if (ps->ps_power == 0)
502 ps->ps_power = elm[1].Integer.Value;
503
504 if (ps->ps_latency == 0)
505 ps->ps_latency = elm[2].Integer.Value;
506
507 if (ps->ps_latency_bm == 0)
508 ps->ps_latency_bm = elm[3].Integer.Value;
509
510 if (ps->ps_control == 0)
511 ps->ps_control = ACPI_GET64(elm[4].Buffer.Pointer);
512
513 if (ps->ps_status == 0)
514 ps->ps_status = ACPI_GET64(elm[5].Buffer.Pointer);
515
516 if (ps->ps_control_mask == 0)
517 ps->ps_control_mask = ACPI_GET64(elm[6].Buffer.Pointer);
518
519 if (ps->ps_status_mask == 0)
520 ps->ps_status_mask = ACPI_GET64(elm[7].Buffer.Pointer);
521
522 ps->ps_flags |= ACPICPU_FLAG_P_XPSS;
523
524 if (ps->ps_freq == 0 || ps->ps_freq > 9999)
525 return AE_BAD_DECIMAL_CONSTANT;
526
527 if (ps->ps_latency == 0 || ps->ps_latency > 1000)
528 ps->ps_latency = 1;
529
530 return AE_OK;
531 }
532
533 ACPI_STATUS
534 acpicpu_pstate_pct(struct acpicpu_softc *sc)
535 {
536 static const size_t size = sizeof(struct acpicpu_reg);
537 struct acpicpu_reg *reg[2];
538 struct acpicpu_pstate *ps;
539 ACPI_OBJECT *elm, *obj;
540 ACPI_BUFFER buf;
541 ACPI_STATUS rv;
542 uint8_t width;
543 uint32_t i;
544
545 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
546
547 if (ACPI_FAILURE(rv))
548 return rv;
549
550 obj = buf.Pointer;
551
552 if (obj->Type != ACPI_TYPE_PACKAGE) {
553 rv = AE_TYPE;
554 goto out;
555 }
556
557 if (obj->Package.Count != 2) {
558 rv = AE_LIMIT;
559 goto out;
560 }
561
562 for (i = 0; i < 2; i++) {
563
564 elm = &obj->Package.Elements[i];
565
566 if (elm->Type != ACPI_TYPE_BUFFER) {
567 rv = AE_TYPE;
568 goto out;
569 }
570
571 if (size > elm->Buffer.Length) {
572 rv = AE_AML_BAD_RESOURCE_LENGTH;
573 goto out;
574 }
575
576 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
577
578 switch (reg[i]->reg_spaceid) {
579
580 case ACPI_ADR_SPACE_SYSTEM_IO:
581
582 if (reg[i]->reg_addr == 0) {
583 rv = AE_AML_ILLEGAL_ADDRESS;
584 goto out;
585 }
586
587 width = reg[i]->reg_bitwidth;
588
589 if (width + reg[i]->reg_bitoffset > 32) {
590 rv = AE_AML_BAD_RESOURCE_VALUE;
591 goto out;
592 }
593
594 if (width != 8 && width != 16 && width != 32) {
595 rv = AE_AML_BAD_RESOURCE_VALUE;
596 goto out;
597 }
598
599 break;
600
601 case ACPI_ADR_SPACE_FIXED_HARDWARE:
602
603 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) {
604
605 if (reg[i]->reg_bitwidth != 64) {
606 rv = AE_AML_BAD_RESOURCE_VALUE;
607 goto out;
608 }
609
610 if (reg[i]->reg_bitoffset != 0) {
611 rv = AE_AML_BAD_RESOURCE_VALUE;
612 goto out;
613 }
614
615 break;
616 }
617
618 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
619 rv = AE_SUPPORT;
620 goto out;
621 }
622
623 break;
624
625 default:
626 rv = AE_AML_INVALID_SPACE_ID;
627 goto out;
628 }
629 }
630
631 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
632 rv = AE_AML_INVALID_SPACE_ID;
633 goto out;
634 }
635
636 (void)memcpy(&sc->sc_pstate_control, reg[0], size);
637 (void)memcpy(&sc->sc_pstate_status, reg[1], size);
638
639 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
640 goto out;
641
642 /*
643 * At the very least, mandate that
644 * XPSS supplies the control address.
645 */
646 if (sc->sc_pstate_control.reg_addr == 0) {
647 rv = AE_AML_BAD_RESOURCE_LENGTH;
648 goto out;
649 }
650
651 /*
652 * If XPSS is present, copy the MSR addresses
653 * to the P-state structures for convenience.
654 */
655 for (i = 0; i < sc->sc_pstate_count; i++) {
656
657 ps = &sc->sc_pstate[i];
658
659 if (ps->ps_freq == 0)
660 continue;
661
662 ps->ps_status_addr = sc->sc_pstate_status.reg_addr;
663 ps->ps_control_addr = sc->sc_pstate_control.reg_addr;
664 }
665
666 out:
667 if (buf.Pointer != NULL)
668 ACPI_FREE(buf.Pointer);
669
670 return rv;
671 }
672
673 static ACPI_STATUS
674 acpicpu_pstate_dep(struct acpicpu_softc *sc)
675 {
676 ACPI_OBJECT *elm, *obj;
677 ACPI_BUFFER buf;
678 ACPI_STATUS rv;
679 uint32_t val;
680 uint8_t i, n;
681
682 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSD", &buf);
683
684 if (ACPI_FAILURE(rv))
685 goto out;
686
687 obj = buf.Pointer;
688
689 if (obj->Type != ACPI_TYPE_PACKAGE) {
690 rv = AE_TYPE;
691 goto out;
692 }
693
694 if (obj->Package.Count != 1) {
695 rv = AE_LIMIT;
696 goto out;
697 }
698
699 elm = &obj->Package.Elements[0];
700
701 if (obj->Type != ACPI_TYPE_PACKAGE) {
702 rv = AE_TYPE;
703 goto out;
704 }
705
706 n = elm->Package.Count;
707
708 if (n != 5) {
709 rv = AE_LIMIT;
710 goto out;
711 }
712
713 elm = elm->Package.Elements;
714
715 for (i = 0; i < n; i++) {
716
717 if (elm[i].Type != ACPI_TYPE_INTEGER) {
718 rv = AE_TYPE;
719 goto out;
720 }
721
722 if (elm[i].Integer.Value > UINT32_MAX) {
723 rv = AE_AML_NUMERIC_OVERFLOW;
724 goto out;
725 }
726 }
727
728 val = elm[1].Integer.Value;
729
730 if (val != 0)
731 aprint_debug_dev(sc->sc_dev, "invalid revision in _PSD\n");
732
733 val = elm[3].Integer.Value;
734
735 if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) {
736 rv = AE_AML_BAD_RESOURCE_VALUE;
737 goto out;
738 }
739
740 val = elm[4].Integer.Value;
741
742 if (val > sc->sc_ncpus) {
743 rv = AE_BAD_VALUE;
744 goto out;
745 }
746
747 sc->sc_pstate_dep.dep_domain = elm[2].Integer.Value;
748 sc->sc_pstate_dep.dep_type = elm[3].Integer.Value;
749 sc->sc_pstate_dep.dep_ncpus = elm[4].Integer.Value;
750
751 out:
752 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
753 aprint_debug_dev(sc->sc_dev, "failed to evaluate "
754 "_PSD: %s\n", AcpiFormatException(rv));
755
756 if (buf.Pointer != NULL)
757 ACPI_FREE(buf.Pointer);
758
759 return rv;
760 }
761
762 static int
763 acpicpu_pstate_max(struct acpicpu_softc *sc)
764 {
765 ACPI_INTEGER val;
766 ACPI_STATUS rv;
767
768 /*
769 * Evaluate the currently highest P-state that can be used.
770 * If available, we can use either this state or any lower
771 * power (i.e. higher numbered) state from the _PSS object.
772 * Note that the return value must match the _OST parameter.
773 */
774 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
775
776 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
777
778 if (sc->sc_pstate[val].ps_freq != 0) {
779 sc->sc_pstate_max = val;
780 return 0;
781 }
782 }
783
784 return 1;
785 }
786
787 static int
788 acpicpu_pstate_min(struct acpicpu_softc *sc)
789 {
790 ACPI_INTEGER val;
791 ACPI_STATUS rv;
792
793 /*
794 * The _PDL object defines the minimum when passive cooling
795 * is being performed. If available, we can use the returned
796 * state or any higher power (i.e. lower numbered) state.
797 */
798 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val);
799
800 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) {
801
802 if (sc->sc_pstate[val].ps_freq == 0)
803 return 1;
804
805 if (val >= sc->sc_pstate_max) {
806 sc->sc_pstate_min = val;
807 return 0;
808 }
809 }
810
811 return 1;
812 }
813
814 static void
815 acpicpu_pstate_change(struct acpicpu_softc *sc)
816 {
817 static ACPI_STATUS rv = AE_OK;
818 ACPI_OBJECT_LIST arg;
819 ACPI_OBJECT obj[2];
820 static int val = 0;
821
822 acpicpu_pstate_reset(sc);
823
824 /*
825 * Cache the checks as the optional
826 * _PDL and _OST are rarely present.
827 */
828 if (val == 0)
829 val = acpicpu_pstate_min(sc);
830
831 arg.Count = 2;
832 arg.Pointer = obj;
833
834 obj[0].Type = ACPI_TYPE_INTEGER;
835 obj[1].Type = ACPI_TYPE_INTEGER;
836
837 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
838 obj[1].Integer.Value = acpicpu_pstate_max(sc);
839
840 if (ACPI_FAILURE(rv))
841 return;
842
843 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
844 }
845
846 static void
847 acpicpu_pstate_reset(struct acpicpu_softc *sc)
848 {
849
850 sc->sc_pstate_max = 0;
851 sc->sc_pstate_min = sc->sc_pstate_count - 1;
852
853 }
854
855 static void
856 acpicpu_pstate_bios(void)
857 {
858 const uint8_t val = AcpiGbl_FADT.PstateControl;
859 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
860
861 if (addr == 0 || val == 0)
862 return;
863
864 (void)AcpiOsWritePort(addr, val, 8);
865 }
866
867 int
868 acpicpu_pstate_get(struct cpu_info *ci, uint32_t *freq)
869 {
870 struct acpicpu_pstate *ps = NULL;
871 struct acpicpu_softc *sc;
872 uint32_t i, val = 0;
873 uint64_t addr;
874 uint8_t width;
875 int rv;
876
877 sc = acpicpu_sc[ci->ci_acpiid];
878
879 if (__predict_false(sc == NULL)) {
880 rv = ENXIO;
881 goto fail;
882 }
883
884 if (__predict_false(sc->sc_cold != false)) {
885 rv = EBUSY;
886 goto fail;
887 }
888
889 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
890 rv = ENODEV;
891 goto fail;
892 }
893
894 mutex_enter(&sc->sc_mtx);
895
896 /*
897 * Use the cached value, if available.
898 */
899 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) {
900 *freq = sc->sc_pstate_current;
901 mutex_exit(&sc->sc_mtx);
902 return 0;
903 }
904
905 mutex_exit(&sc->sc_mtx);
906
907 switch (sc->sc_pstate_status.reg_spaceid) {
908
909 case ACPI_ADR_SPACE_FIXED_HARDWARE:
910
911 rv = acpicpu_md_pstate_get(sc, freq);
912
913 if (__predict_false(rv != 0))
914 goto fail;
915
916 break;
917
918 case ACPI_ADR_SPACE_SYSTEM_IO:
919
920 addr = sc->sc_pstate_status.reg_addr;
921 width = sc->sc_pstate_status.reg_bitwidth;
922
923 (void)AcpiOsReadPort(addr, &val, width);
924
925 if (val == 0) {
926 rv = EIO;
927 goto fail;
928 }
929
930 for (i = 0; i < sc->sc_pstate_count; i++) {
931
932 if (sc->sc_pstate[i].ps_freq == 0)
933 continue;
934
935 if (val == sc->sc_pstate[i].ps_status) {
936 ps = &sc->sc_pstate[i];
937 break;
938 }
939 }
940
941 if (ps == NULL) {
942 rv = EIO;
943 goto fail;
944 }
945
946 *freq = ps->ps_freq;
947 break;
948
949 default:
950 rv = ENOTTY;
951 goto fail;
952 }
953
954 mutex_enter(&sc->sc_mtx);
955 sc->sc_pstate_current = *freq;
956 mutex_exit(&sc->sc_mtx);
957
958 return 0;
959
960 fail:
961 aprint_error_dev(sc->sc_dev, "failed "
962 "to get frequency (err %d)\n", rv);
963
964 mutex_enter(&sc->sc_mtx);
965 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
966 mutex_exit(&sc->sc_mtx);
967
968 return rv;
969 }
970
971 void
972 acpicpu_pstate_set(struct cpu_info *ci, uint32_t freq)
973 {
974 uint64_t xc;
975
976 xc = xc_broadcast(0, acpicpu_pstate_set_xcall, &freq, NULL);
977 xc_wait(xc);
978 }
979
980 static void
981 acpicpu_pstate_set_xcall(void *arg1, void *arg2)
982 {
983 struct acpicpu_pstate *ps = NULL;
984 struct cpu_info *ci = curcpu();
985 struct acpicpu_softc *sc;
986 uint32_t freq, i, val;
987 uint64_t addr;
988 uint8_t width;
989 int rv;
990
991 freq = *(uint32_t *)arg1;
992 sc = acpicpu_sc[ci->ci_acpiid];
993
994 if (__predict_false(sc == NULL)) {
995 rv = ENXIO;
996 goto fail;
997 }
998
999 if (__predict_false(sc->sc_cold != false)) {
1000 rv = EBUSY;
1001 goto fail;
1002 }
1003
1004 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) {
1005 rv = ENODEV;
1006 goto fail;
1007 }
1008
1009 mutex_enter(&sc->sc_mtx);
1010
1011 if (sc->sc_pstate_current == freq) {
1012 mutex_exit(&sc->sc_mtx);
1013 return;
1014 }
1015
1016 /*
1017 * Verify that the requested frequency is available.
1018 *
1019 * The access needs to be protected since the currently
1020 * available maximum and minimum may change dynamically.
1021 */
1022 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) {
1023
1024 if (__predict_false(sc->sc_pstate[i].ps_freq == 0))
1025 continue;
1026
1027 if (sc->sc_pstate[i].ps_freq == freq) {
1028 ps = &sc->sc_pstate[i];
1029 break;
1030 }
1031 }
1032
1033 mutex_exit(&sc->sc_mtx);
1034
1035 if (__predict_false(ps == NULL)) {
1036 rv = EINVAL;
1037 goto fail;
1038 }
1039
1040 switch (sc->sc_pstate_control.reg_spaceid) {
1041
1042 case ACPI_ADR_SPACE_FIXED_HARDWARE:
1043
1044 rv = acpicpu_md_pstate_set(ps);
1045
1046 if (__predict_false(rv != 0))
1047 goto fail;
1048
1049 break;
1050
1051 case ACPI_ADR_SPACE_SYSTEM_IO:
1052
1053 addr = sc->sc_pstate_control.reg_addr;
1054 width = sc->sc_pstate_control.reg_bitwidth;
1055
1056 (void)AcpiOsWritePort(addr, ps->ps_control, width);
1057
1058 addr = sc->sc_pstate_status.reg_addr;
1059 width = sc->sc_pstate_status.reg_bitwidth;
1060
1061 /*
1062 * Some systems take longer to respond
1063 * than the reported worst-case latency.
1064 */
1065 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
1066
1067 (void)AcpiOsReadPort(addr, &val, width);
1068
1069 if (val == ps->ps_status)
1070 break;
1071
1072 DELAY(ps->ps_latency);
1073 }
1074
1075 if (i == ACPICPU_P_STATE_RETRY) {
1076 rv = EAGAIN;
1077 goto fail;
1078 }
1079
1080 break;
1081
1082 default:
1083 rv = ENOTTY;
1084 goto fail;
1085 }
1086
1087 mutex_enter(&sc->sc_mtx);
1088 ps->ps_evcnt.ev_count++;
1089 sc->sc_pstate_current = freq;
1090 mutex_exit(&sc->sc_mtx);
1091
1092 return;
1093
1094 fail:
1095 aprint_error_dev(sc->sc_dev, "failed to set "
1096 "frequency to %u (err %d)\n", freq, rv);
1097
1098 mutex_enter(&sc->sc_mtx);
1099 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
1100 mutex_exit(&sc->sc_mtx);
1101 }
1102