acpi_cpu.c revision 1.40.2.1 1 /* $NetBSD: acpi_cpu.c,v 1.40.2.1 2011/06/23 14:19:55 cherry Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.40.2.1 2011/06/23 14:19:55 cherry Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/evcnt.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40
41 #include <dev/acpi/acpireg.h>
42 #include <dev/acpi/acpivar.h>
43 #include <dev/acpi/acpi_cpu.h>
44
45 #include <machine/acpi_machdep.h>
46 #include <machine/cpuvar.h>
47
48 #define _COMPONENT ACPI_BUS_COMPONENT
49 ACPI_MODULE_NAME ("acpi_cpu")
50
51 static int acpicpu_match(device_t, cfdata_t, void *);
52 static void acpicpu_attach(device_t, device_t, void *);
53 static int acpicpu_detach(device_t, int);
54 static int acpicpu_once_attach(void);
55 static int acpicpu_once_detach(void);
56 static void acpicpu_start(device_t);
57 static void acpicpu_sysctl(device_t);
58
59 static ACPI_STATUS acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
60 static uint32_t acpicpu_cap(struct acpicpu_softc *);
61 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *,
62 uint32_t, uint32_t *);
63 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
64 static bool acpicpu_suspend(device_t, const pmf_qual_t *);
65 static bool acpicpu_resume(device_t, const pmf_qual_t *);
66 static void acpicpu_evcnt_attach(device_t);
67 static void acpicpu_evcnt_detach(device_t);
68 static void acpicpu_debug_print(device_t);
69 static const char *acpicpu_debug_print_method(uint8_t);
70 static const char *acpicpu_debug_print_dep(uint32_t);
71
72 static uint32_t acpicpu_count = 0;
73 struct acpicpu_softc **acpicpu_sc = NULL;
74 static struct sysctllog *acpicpu_log = NULL;
75 static bool acpicpu_dynamic = true;
76 static bool acpicpu_passive = true;
77
78 static const struct {
79 const char *manu;
80 const char *prod;
81 const char *vers;
82 } acpicpu_quirks[] = {
83 { "Supermicro", "PDSMi-LN4", "0123456789" },
84 };
85
86 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
87 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
88
89 static int
90 acpicpu_match(device_t parent, cfdata_t match, void *aux)
91 {
92 const char *manu, *prod, *vers;
93 struct cpu_info *ci;
94 size_t i;
95
96 if (acpi_softc == NULL)
97 return 0;
98
99 manu = pmf_get_platform("system-manufacturer");
100 prod = pmf_get_platform("system-product-name");
101 vers = pmf_get_platform("system-version");
102
103 if (manu != NULL && prod != NULL && vers != NULL) {
104
105 for (i = 0; i < __arraycount(acpicpu_quirks); i++) {
106
107 if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 &&
108 strcasecmp(acpicpu_quirks[i].prod, prod) == 0 &&
109 strcasecmp(acpicpu_quirks[i].vers, vers) == 0)
110 return 0;
111 }
112 }
113
114 ci = acpicpu_md_match(parent, match, aux);
115
116 if (ci == NULL)
117 return 0;
118
119 if (acpi_match_cpu_info(ci) == NULL)
120 return 0;
121
122 return 10;
123 }
124
125 static void
126 acpicpu_attach(device_t parent, device_t self, void *aux)
127 {
128 struct acpicpu_softc *sc = device_private(self);
129 struct cpu_info *ci;
130 ACPI_HANDLE hdl;
131 cpuid_t id;
132 int rv;
133
134 ci = acpicpu_md_attach(parent, self, aux);
135
136 if (ci == NULL)
137 return;
138
139 sc->sc_ci = ci;
140 sc->sc_dev = self;
141 sc->sc_cold = true;
142
143 hdl = acpi_match_cpu_info(ci);
144
145 if (hdl == NULL) {
146 aprint_normal(": failed to match processor\n");
147 return;
148 }
149
150 sc->sc_node = acpi_match_node(hdl);
151
152 if (acpicpu_once_attach() != 0) {
153 aprint_normal(": failed to initialize\n");
154 return;
155 }
156
157 KASSERT(acpi_softc != NULL);
158 KASSERT(acpicpu_sc != NULL);
159 KASSERT(sc->sc_node != NULL);
160
161 id = sc->sc_ci->ci_acpiid;
162
163 if (acpicpu_sc[id] != NULL) {
164 aprint_normal(": already attached\n");
165 return;
166 }
167
168 aprint_naive("\n");
169 aprint_normal(": ACPI CPU\n");
170
171 rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
172
173 if (ACPI_FAILURE(rv))
174 aprint_verbose_dev(self, "failed to obtain CPU object\n");
175
176 acpicpu_count++;
177 acpicpu_sc[id] = sc;
178
179 sc->sc_cap = acpicpu_cap(sc);
180 sc->sc_ncpus = acpi_md_ncpus();
181 sc->sc_flags = acpicpu_md_flags();
182
183 KASSERT(acpicpu_count <= sc->sc_ncpus);
184 KASSERT(sc->sc_node->ad_device == NULL);
185
186 sc->sc_node->ad_device = self;
187 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
188
189 acpicpu_cstate_attach(self);
190 acpicpu_pstate_attach(self);
191 acpicpu_tstate_attach(self);
192
193 acpicpu_debug_print(self);
194 acpicpu_evcnt_attach(self);
195
196 (void)config_interrupts(self, acpicpu_start);
197 (void)acpi_register_notify(sc->sc_node, acpicpu_notify);
198 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
199 }
200
201 static int
202 acpicpu_detach(device_t self, int flags)
203 {
204 struct acpicpu_softc *sc = device_private(self);
205 int rv = 0;
206
207 sc->sc_cold = true;
208
209 acpicpu_evcnt_detach(self);
210 acpi_deregister_notify(sc->sc_node);
211
212 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
213 rv = acpicpu_cstate_detach(self);
214
215 if (rv != 0)
216 return rv;
217
218 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
219 rv = acpicpu_pstate_detach(self);
220
221 if (rv != 0)
222 return rv;
223
224 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
225 rv = acpicpu_tstate_detach(self);
226
227 if (rv != 0)
228 return rv;
229
230 mutex_destroy(&sc->sc_mtx);
231
232 sc->sc_node->ad_device = NULL;
233
234 acpicpu_count--;
235 acpicpu_once_detach();
236
237 return 0;
238 }
239
240 static int
241 acpicpu_once_attach(void)
242 {
243 struct acpicpu_softc *sc;
244 unsigned int i;
245
246 if (acpicpu_count != 0)
247 return 0;
248
249 KASSERT(acpicpu_sc == NULL);
250 KASSERT(acpicpu_log == NULL);
251
252 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
253
254 if (acpicpu_sc == NULL)
255 return ENOMEM;
256
257 for (i = 0; i < maxcpus; i++)
258 acpicpu_sc[i] = NULL;
259
260 return 0;
261 }
262
263 static int
264 acpicpu_once_detach(void)
265 {
266 struct acpicpu_softc *sc;
267
268 if (acpicpu_count != 0)
269 return EDEADLK;
270
271 if (acpicpu_log != NULL)
272 sysctl_teardown(&acpicpu_log);
273
274 if (acpicpu_sc != NULL)
275 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
276
277 return 0;
278 }
279
280 static void
281 acpicpu_start(device_t self)
282 {
283 struct acpicpu_softc *sc = device_private(self);
284 static uint32_t count = 0;
285
286 /*
287 * Run the state-specific initialization routines. These
288 * must run only once, after interrupts have been enabled,
289 * all CPUs are running, and all ACPI CPUs have attached.
290 */
291 if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
292 sc->sc_cold = false;
293 return;
294 }
295
296 /*
297 * Set the last ACPI CPU as non-cold
298 * only after C-states are enabled.
299 */
300 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
301 acpicpu_cstate_start(self);
302
303 sc->sc_cold = false;
304
305 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
306 acpicpu_pstate_start(self);
307
308 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
309 acpicpu_tstate_start(self);
310
311 acpicpu_sysctl(self);
312 aprint_debug_dev(self, "ACPI CPUs started\n");
313 }
314
315 static void
316 acpicpu_sysctl(device_t self)
317 {
318 const struct sysctlnode *node;
319 int err;
320
321 KASSERT(acpicpu_log == NULL);
322
323 err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
324 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
325 NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
326
327 if (err != 0)
328 goto fail;
329
330 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
331 CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
332 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
333
334 if (err != 0)
335 goto fail;
336
337 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
338 0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
339 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
340
341 if (err != 0)
342 goto fail;
343
344 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
345 CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
346 SYSCTL_DESCR("Dynamic states"), NULL, 0,
347 &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
348
349 if (err != 0)
350 goto fail;
351
352 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
353 CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
354 SYSCTL_DESCR("Passive cooling"), NULL, 0,
355 &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
356
357 if (err != 0)
358 goto fail;
359
360 return;
361
362 fail:
363 aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
364 }
365
366 static ACPI_STATUS
367 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
368 {
369 ACPI_OBJECT *obj;
370 ACPI_BUFFER buf;
371 ACPI_STATUS rv;
372
373 rv = acpi_eval_struct(hdl, NULL, &buf);
374
375 if (ACPI_FAILURE(rv))
376 goto out;
377
378 obj = buf.Pointer;
379
380 if (obj->Type != ACPI_TYPE_PROCESSOR) {
381 rv = AE_TYPE;
382 goto out;
383 }
384
385 if (obj->Processor.ProcId > (uint32_t)maxcpus) {
386 rv = AE_LIMIT;
387 goto out;
388 }
389
390 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
391
392 if (ao != NULL) {
393 ao->ao_procid = obj->Processor.ProcId;
394 ao->ao_pblklen = obj->Processor.PblkLength;
395 ao->ao_pblkaddr = obj->Processor.PblkAddress;
396 }
397
398 out:
399 if (buf.Pointer != NULL)
400 ACPI_FREE(buf.Pointer);
401
402 return rv;
403 }
404
405 static uint32_t
406 acpicpu_cap(struct acpicpu_softc *sc)
407 {
408 uint32_t flags, cap = 0;
409 ACPI_STATUS rv;
410
411 /*
412 * Query and set machine-dependent capabilities.
413 * Note that the Intel-specific _PDC method has
414 * already been evaluated. It was furthermore
415 * deprecated in the ACPI 3.0 in favor of _OSC.
416 */
417 flags = acpi_md_pdc();
418 rv = acpicpu_cap_osc(sc, flags, &cap);
419
420 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
421
422 aprint_error_dev(sc->sc_dev, "failed to evaluate "
423 "_OSC: %s\n", AcpiFormatException(rv));
424 }
425
426 return (cap != 0) ? cap : flags;
427 }
428
429 static ACPI_STATUS
430 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
431 {
432 ACPI_OBJECT_LIST arg;
433 ACPI_OBJECT obj[4];
434 ACPI_OBJECT *osc;
435 ACPI_BUFFER buf;
436 ACPI_STATUS rv;
437 uint32_t cap[2];
438 uint32_t *ptr;
439 int i = 5;
440
441 static uint8_t intel_uuid[16] = {
442 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
443 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
444 };
445
446 cap[0] = ACPI_OSC_QUERY;
447 cap[1] = flags;
448
449 again:
450 arg.Count = 4;
451 arg.Pointer = obj;
452
453 obj[0].Type = ACPI_TYPE_BUFFER;
454 obj[0].Buffer.Length = sizeof(intel_uuid);
455 obj[0].Buffer.Pointer = intel_uuid;
456
457 obj[1].Type = ACPI_TYPE_INTEGER;
458 obj[1].Integer.Value = ACPICPU_PDC_REVID;
459
460 obj[2].Type = ACPI_TYPE_INTEGER;
461 obj[2].Integer.Value = __arraycount(cap);
462
463 obj[3].Type = ACPI_TYPE_BUFFER;
464 obj[3].Buffer.Length = sizeof(cap);
465 obj[3].Buffer.Pointer = (void *)cap;
466
467 buf.Pointer = NULL;
468 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
469
470 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
471
472 if (ACPI_FAILURE(rv))
473 goto out;
474
475 osc = buf.Pointer;
476
477 if (osc->Type != ACPI_TYPE_BUFFER) {
478 rv = AE_TYPE;
479 goto out;
480 }
481
482 if (osc->Buffer.Length != sizeof(cap)) {
483 rv = AE_BUFFER_OVERFLOW;
484 goto out;
485 }
486
487 ptr = (uint32_t *)osc->Buffer.Pointer;
488
489 if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
490 rv = AE_ERROR;
491 goto out;
492 }
493
494 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
495 rv = AE_BAD_PARAMETER;
496 goto out;
497 }
498
499 /*
500 * "It is strongly recommended that the OS evaluate
501 * _OSC with the Query Support Flag set until _OSC
502 * returns the Capabilities Masked bit clear, to
503 * negotiate the set of features to be granted to
504 * the OS for native support (ACPI 4.0, 6.2.10)."
505 */
506 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
507
508 ACPI_FREE(buf.Pointer);
509 i--;
510
511 goto again;
512 }
513
514 if ((cap[0] & ACPI_OSC_QUERY) != 0) {
515
516 ACPI_FREE(buf.Pointer);
517 cap[0] &= ~ACPI_OSC_QUERY;
518
519 goto again;
520 }
521
522 /*
523 * It is permitted for _OSC to return all
524 * bits cleared, but this is specified to
525 * vary on per-device basis. Assume that
526 * everything rather than nothing will be
527 * supported in this case; we do not need
528 * the firmware to know the CPU features.
529 */
530 *val = (ptr[1] != 0) ? ptr[1] : cap[1];
531
532 out:
533 if (buf.Pointer != NULL)
534 ACPI_FREE(buf.Pointer);
535
536 return rv;
537 }
538
539 static void
540 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
541 {
542 ACPI_OSD_EXEC_CALLBACK func;
543 struct acpicpu_softc *sc;
544 device_t self = aux;
545
546 sc = device_private(self);
547
548 if (sc->sc_cold != false)
549 return;
550
551 if (acpicpu_dynamic != true)
552 return;
553
554 switch (evt) {
555
556 case ACPICPU_C_NOTIFY:
557
558 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
559 return;
560
561 func = acpicpu_cstate_callback;
562 break;
563
564 case ACPICPU_P_NOTIFY:
565
566 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
567 return;
568
569 func = acpicpu_pstate_callback;
570 break;
571
572 case ACPICPU_T_NOTIFY:
573
574 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
575 return;
576
577 func = acpicpu_tstate_callback;
578 break;
579
580 default:
581 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt);
582 return;
583 }
584
585 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
586 }
587
588 static bool
589 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
590 {
591 struct acpicpu_softc *sc = device_private(self);
592
593 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
594 (void)acpicpu_cstate_suspend(self);
595
596 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
597 (void)acpicpu_pstate_suspend(self);
598
599 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
600 (void)acpicpu_tstate_suspend(self);
601
602 sc->sc_cold = true;
603
604 return true;
605 }
606
607 static bool
608 acpicpu_resume(device_t self, const pmf_qual_t *qual)
609 {
610 struct acpicpu_softc *sc = device_private(self);
611 static const int handler = OSL_NOTIFY_HANDLER;
612
613 sc->sc_cold = false;
614
615 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
616 (void)AcpiOsExecute(handler, acpicpu_cstate_resume, self);
617
618 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
619 (void)AcpiOsExecute(handler, acpicpu_pstate_resume, self);
620
621 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
622 (void)AcpiOsExecute(handler, acpicpu_tstate_resume, self);
623
624 return true;
625 }
626
627 static void
628 acpicpu_evcnt_attach(device_t self)
629 {
630 struct acpicpu_softc *sc = device_private(self);
631 struct acpicpu_cstate *cs;
632 struct acpicpu_pstate *ps;
633 struct acpicpu_tstate *ts;
634 const char *str;
635 uint32_t i;
636
637 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
638
639 cs = &sc->sc_cstate[i];
640
641 if (cs->cs_method == 0)
642 continue;
643
644 str = "HALT";
645
646 if (cs->cs_method == ACPICPU_C_STATE_FFH)
647 str = "MWAIT";
648
649 if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
650 str = "I/O";
651
652 (void)snprintf(cs->cs_name, sizeof(cs->cs_name),
653 "C%d (%s)", i, str);
654
655 evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
656 NULL, device_xname(sc->sc_dev), cs->cs_name);
657 }
658
659 for (i = 0; i < sc->sc_pstate_count; i++) {
660
661 ps = &sc->sc_pstate[i];
662
663 if (ps->ps_freq == 0)
664 continue;
665
666 (void)snprintf(ps->ps_name, sizeof(ps->ps_name),
667 "P%u (%u MHz)", i, ps->ps_freq);
668
669 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
670 NULL, device_xname(sc->sc_dev), ps->ps_name);
671 }
672
673 for (i = 0; i < sc->sc_tstate_count; i++) {
674
675 ts = &sc->sc_tstate[i];
676
677 if (ts->ts_percent == 0)
678 continue;
679
680 (void)snprintf(ts->ts_name, sizeof(ts->ts_name),
681 "T%u (%u %%)", i, ts->ts_percent);
682
683 evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
684 NULL, device_xname(sc->sc_dev), ts->ts_name);
685 }
686 }
687
688 static void
689 acpicpu_evcnt_detach(device_t self)
690 {
691 struct acpicpu_softc *sc = device_private(self);
692 struct acpicpu_cstate *cs;
693 struct acpicpu_pstate *ps;
694 struct acpicpu_tstate *ts;
695 uint32_t i;
696
697 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
698
699 cs = &sc->sc_cstate[i];
700
701 if (cs->cs_method != 0)
702 evcnt_detach(&cs->cs_evcnt);
703 }
704
705 for (i = 0; i < sc->sc_pstate_count; i++) {
706
707 ps = &sc->sc_pstate[i];
708
709 if (ps->ps_freq != 0)
710 evcnt_detach(&ps->ps_evcnt);
711 }
712
713 for (i = 0; i < sc->sc_tstate_count; i++) {
714
715 ts = &sc->sc_tstate[i];
716
717 if (ts->ts_percent != 0)
718 evcnt_detach(&ts->ts_evcnt);
719 }
720 }
721
722 static void
723 acpicpu_debug_print(device_t self)
724 {
725 struct acpicpu_softc *sc = device_private(self);
726 struct cpu_info *ci = sc->sc_ci;
727 struct acpicpu_cstate *cs;
728 struct acpicpu_pstate *ps;
729 struct acpicpu_tstate *ts;
730 static bool once = false;
731 struct acpicpu_dep *dep;
732 uint32_t i, method;
733
734 if (once != true) {
735
736 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
737
738 cs = &sc->sc_cstate[i];
739
740 if (cs->cs_method == 0)
741 continue;
742
743 aprint_verbose_dev(sc->sc_dev, "C%d: %3s, "
744 "lat %3u us, pow %5u mW%s\n", i,
745 acpicpu_debug_print_method(cs->cs_method),
746 cs->cs_latency, cs->cs_power,
747 (cs->cs_flags != 0) ? ", bus master check" : "");
748 }
749
750 method = sc->sc_pstate_control.reg_spaceid;
751
752 for (i = 0; i < sc->sc_pstate_count; i++) {
753
754 ps = &sc->sc_pstate[i];
755
756 if (ps->ps_freq == 0)
757 continue;
758
759 aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
760 "lat %3u us, pow %5u mW, %4u MHz%s\n", i,
761 acpicpu_debug_print_method(method),
762 ps->ps_latency, ps->ps_power, ps->ps_freq,
763 (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ?
764 ", turbo boost" : "");
765 }
766
767 method = sc->sc_tstate_control.reg_spaceid;
768
769 for (i = 0; i < sc->sc_tstate_count; i++) {
770
771 ts = &sc->sc_tstate[i];
772
773 if (ts->ts_percent == 0)
774 continue;
775
776 aprint_verbose_dev(sc->sc_dev, "T%u: %3s, "
777 "lat %3u us, pow %5u mW, %3u %%\n", i,
778 acpicpu_debug_print_method(method),
779 ts->ts_latency, ts->ts_power, ts->ts_percent);
780 }
781
782 once = true;
783 }
784
785 aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
786 "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
787 (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
788
789 if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
790
791 dep = &sc->sc_cstate_dep;
792
793 aprint_debug_dev(sc->sc_dev, "C-state coordination: "
794 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
795 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
796 }
797
798 if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
799
800 dep = &sc->sc_pstate_dep;
801
802 aprint_debug_dev(sc->sc_dev, "P-state coordination: "
803 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
804 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
805 }
806
807 if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
808
809 dep = &sc->sc_tstate_dep;
810
811 aprint_debug_dev(sc->sc_dev, "T-state coordination: "
812 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
813 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
814 }
815 }
816
817 static const char *
818 acpicpu_debug_print_method(uint8_t val)
819 {
820
821 if (val == ACPICPU_C_STATE_FFH)
822 return "FFH";
823
824 if (val == ACPICPU_C_STATE_HALT)
825 return "HLT";
826
827 if (val == ACPICPU_C_STATE_SYSIO)
828 return "I/O";
829
830 if (val == ACPI_ADR_SPACE_SYSTEM_IO)
831 return "I/O";
832
833 if (val == ACPI_ADR_SPACE_FIXED_HARDWARE)
834 return "FFH";
835
836 return "???";
837 }
838
839 static const char *
840 acpicpu_debug_print_dep(uint32_t val)
841 {
842
843 switch (val) {
844
845 case ACPICPU_DEP_SW_ALL:
846 return "SW_ALL";
847
848 case ACPICPU_DEP_SW_ANY:
849 return "SW_ANY";
850
851 case ACPICPU_DEP_HW_ALL:
852 return "HW_ALL";
853
854 default:
855 return "unknown";
856 }
857 }
858
859 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
860
861 #ifdef _MODULE
862 #include "ioconf.c"
863 #endif
864
865 static int
866 acpicpu_modcmd(modcmd_t cmd, void *aux)
867 {
868 int rv = 0;
869
870 switch (cmd) {
871
872 case MODULE_CMD_INIT:
873
874 #ifdef _MODULE
875 rv = config_init_component(cfdriver_ioconf_acpicpu,
876 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
877 #endif
878 break;
879
880 case MODULE_CMD_FINI:
881
882 #ifdef _MODULE
883 rv = config_fini_component(cfdriver_ioconf_acpicpu,
884 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
885 #endif
886 break;
887
888 default:
889 rv = ENOTTY;
890 }
891
892 return rv;
893 }
894