acpi_cpu.c revision 1.33 1 /* $NetBSD: acpi_cpu.c,v 1.33 2011/03/01 05:57:04 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.33 2011/03/01 05:57:04 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/evcnt.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40
41 #include <dev/acpi/acpireg.h>
42 #include <dev/acpi/acpivar.h>
43 #include <dev/acpi/acpi_cpu.h>
44
45 #include <machine/acpi_machdep.h>
46 #include <machine/cpuvar.h>
47
48 #define _COMPONENT ACPI_BUS_COMPONENT
49 ACPI_MODULE_NAME ("acpi_cpu")
50
51 static int acpicpu_match(device_t, cfdata_t, void *);
52 static void acpicpu_attach(device_t, device_t, void *);
53 static int acpicpu_detach(device_t, int);
54 static int acpicpu_once_attach(void);
55 static int acpicpu_once_detach(void);
56 static void acpicpu_start(device_t);
57 static void acpicpu_sysctl(device_t);
58
59 static ACPI_STATUS acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
60 static int acpicpu_find(struct cpu_info *,
61 struct acpi_devnode **);
62 static uint32_t acpicpu_cap(struct acpicpu_softc *);
63 static ACPI_STATUS acpicpu_cap_pdc(struct acpicpu_softc *, uint32_t);
64 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *,
65 uint32_t, uint32_t *);
66 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
67 static bool acpicpu_suspend(device_t, const pmf_qual_t *);
68 static bool acpicpu_resume(device_t, const pmf_qual_t *);
69 static void acpicpu_evcnt_attach(device_t);
70 static void acpicpu_evcnt_detach(device_t);
71 static void acpicpu_debug_print(device_t);
72 static const char *acpicpu_debug_print_method(uint8_t);
73 static const char *acpicpu_debug_print_dep(uint32_t);
74
75 static uint32_t acpicpu_count = 0;
76 struct acpicpu_softc **acpicpu_sc = NULL;
77 static struct sysctllog *acpicpu_log = NULL;
78 static bool acpicpu_dynamic = true;
79 static bool acpicpu_passive = true;
80
81 static const char * const acpicpu_hid[] = {
82 "ACPI0007",
83 NULL
84 };
85
86 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
87 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
88
89 static int
90 acpicpu_match(device_t parent, cfdata_t match, void *aux)
91 {
92 struct cpu_info *ci;
93
94 if (acpi_softc == NULL)
95 return 0;
96
97 ci = acpicpu_md_match(parent, match, aux);
98
99 if (ci == NULL)
100 return 0;
101
102 return acpicpu_find(ci, NULL);
103 }
104
105 static void
106 acpicpu_attach(device_t parent, device_t self, void *aux)
107 {
108 struct acpicpu_softc *sc = device_private(self);
109 struct cpu_info *ci;
110 cpuid_t id;
111 int rv;
112
113 ci = acpicpu_md_attach(parent, self, aux);
114
115 if (ci == NULL)
116 return;
117
118 sc->sc_ci = ci;
119 sc->sc_dev = self;
120 sc->sc_cold = true;
121 sc->sc_node = NULL;
122
123 rv = acpicpu_find(ci, &sc->sc_node);
124
125 if (rv == 0) {
126 aprint_normal(": failed to match processor\n");
127 return;
128 }
129
130 if (acpicpu_once_attach() != 0) {
131 aprint_normal(": failed to initialize\n");
132 return;
133 }
134
135 KASSERT(acpi_softc != NULL);
136 KASSERT(acpicpu_sc != NULL);
137 KASSERT(sc->sc_node != NULL);
138
139 id = sc->sc_ci->ci_acpiid;
140
141 if (acpicpu_sc[id] != NULL) {
142 aprint_normal(": already attached\n");
143 return;
144 }
145
146 aprint_naive("\n");
147 aprint_normal(": ACPI CPU\n");
148
149 rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
150
151 if (ACPI_FAILURE(rv))
152 aprint_verbose_dev(self, "failed to obtain CPU object\n");
153
154 acpicpu_count++;
155 acpicpu_sc[id] = sc;
156
157 sc->sc_cap = acpicpu_cap(sc);
158 sc->sc_ncpus = acpi_md_ncpus();
159 sc->sc_flags = acpicpu_md_flags();
160
161 KASSERT(acpicpu_count <= sc->sc_ncpus);
162 KASSERT(sc->sc_node->ad_device == NULL);
163
164 sc->sc_node->ad_device = self;
165 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
166
167 acpicpu_cstate_attach(self);
168 acpicpu_pstate_attach(self);
169 acpicpu_tstate_attach(self);
170
171 acpicpu_debug_print(self);
172 acpicpu_evcnt_attach(self);
173
174 (void)config_interrupts(self, acpicpu_start);
175 (void)acpi_register_notify(sc->sc_node, acpicpu_notify);
176 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
177 }
178
179 static int
180 acpicpu_detach(device_t self, int flags)
181 {
182 struct acpicpu_softc *sc = device_private(self);
183 int rv = 0;
184
185 sc->sc_cold = true;
186 acpi_deregister_notify(sc->sc_node);
187
188 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
189 rv = acpicpu_cstate_detach(self);
190
191 if (rv != 0)
192 return rv;
193
194 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
195 rv = acpicpu_pstate_detach(self);
196
197 if (rv != 0)
198 return rv;
199
200 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
201 rv = acpicpu_tstate_detach(self);
202
203 if (rv != 0)
204 return rv;
205
206 mutex_destroy(&sc->sc_mtx);
207 acpicpu_evcnt_detach(self);
208
209 sc->sc_node->ad_device = NULL;
210
211 acpicpu_count--;
212 acpicpu_once_detach();
213
214 return 0;
215 }
216
217 static int
218 acpicpu_once_attach(void)
219 {
220 struct acpicpu_softc *sc;
221 unsigned int i;
222
223 if (acpicpu_count != 0)
224 return 0;
225
226 KASSERT(acpicpu_sc == NULL);
227 KASSERT(acpicpu_log == NULL);
228
229 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
230
231 if (acpicpu_sc == NULL)
232 return ENOMEM;
233
234 for (i = 0; i < maxcpus; i++)
235 acpicpu_sc[i] = NULL;
236
237 return 0;
238 }
239
240 static int
241 acpicpu_once_detach(void)
242 {
243 struct acpicpu_softc *sc;
244
245 if (acpicpu_count != 0)
246 return EDEADLK;
247
248 if (acpicpu_log != NULL)
249 sysctl_teardown(&acpicpu_log);
250
251 if (acpicpu_sc != NULL)
252 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
253
254 return 0;
255 }
256
257 static void
258 acpicpu_start(device_t self)
259 {
260 struct acpicpu_softc *sc = device_private(self);
261 static uint32_t count = 0;
262
263 /*
264 * Run the state-specific initialization routines. These
265 * must run only once, after interrupts have been enabled,
266 * all CPUs are running, and all ACPI CPUs have attached.
267 */
268 if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
269 sc->sc_cold = false;
270 return;
271 }
272
273 /*
274 * Set the last ACPI CPU as non-cold
275 * only after C-states are enabled.
276 */
277 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
278 acpicpu_cstate_start(self);
279
280 sc->sc_cold = false;
281
282 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
283 acpicpu_pstate_start(self);
284
285 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
286 acpicpu_tstate_start(self);
287
288 acpicpu_sysctl(self);
289 aprint_debug_dev(self, "ACPI CPUs started\n");
290 }
291
292 static void
293 acpicpu_sysctl(device_t self)
294 {
295 const struct sysctlnode *node;
296 int err;
297
298 KASSERT(acpicpu_log == NULL);
299
300 err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
301 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
302 NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
303
304 if (err != 0)
305 goto fail;
306
307 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
308 CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
309 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
310
311 if (err != 0)
312 goto fail;
313
314 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
315 0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
316 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
317
318 if (err != 0)
319 goto fail;
320
321 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
322 CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
323 SYSCTL_DESCR("Dynamic states"), NULL, 0,
324 &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
325
326 if (err != 0)
327 goto fail;
328
329 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
330 CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
331 SYSCTL_DESCR("Passive cooling"), NULL, 0,
332 &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
333
334 if (err != 0)
335 goto fail;
336
337 return;
338
339 fail:
340 aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
341 }
342
343 static ACPI_STATUS
344 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
345 {
346 ACPI_OBJECT *obj;
347 ACPI_BUFFER buf;
348 ACPI_STATUS rv;
349
350 rv = acpi_eval_struct(hdl, NULL, &buf);
351
352 if (ACPI_FAILURE(rv))
353 goto out;
354
355 obj = buf.Pointer;
356
357 if (obj->Type != ACPI_TYPE_PROCESSOR) {
358 rv = AE_TYPE;
359 goto out;
360 }
361
362 if (obj->Processor.ProcId > (uint32_t)maxcpus) {
363 rv = AE_LIMIT;
364 goto out;
365 }
366
367 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
368
369 if (ao != NULL) {
370 ao->ao_procid = obj->Processor.ProcId;
371 ao->ao_pblklen = obj->Processor.PblkLength;
372 ao->ao_pblkaddr = obj->Processor.PblkAddress;
373 }
374
375 out:
376 if (buf.Pointer != NULL)
377 ACPI_FREE(buf.Pointer);
378
379 return rv;
380 }
381
382 static int
383 acpicpu_find(struct cpu_info *ci, struct acpi_devnode **ptr)
384 {
385 struct acpi_softc *sc = acpi_softc;
386 struct acpicpu_object ao;
387 struct acpi_devnode *ad;
388 ACPI_INTEGER val;
389 ACPI_STATUS rv;
390
391 if (sc == NULL || acpi_active == 0)
392 return 0;
393
394 /*
395 * CPUs are declared in the ACPI namespace
396 * either as a Processor() or as a Device().
397 * In both cases the MADT entries are used
398 * for the match (see ACPI 4.0, section 8.4).
399 */
400 SIMPLEQ_FOREACH(ad, &sc->ad_head, ad_list) {
401
402 if (ad->ad_type == ACPI_TYPE_PROCESSOR) {
403
404 rv = acpicpu_object(ad->ad_handle, &ao);
405
406 if (ACPI_SUCCESS(rv) && ci->ci_acpiid == ao.ao_procid)
407 goto out;
408 }
409
410 if (acpi_match_hid(ad->ad_devinfo, acpicpu_hid) != 0) {
411
412 rv = acpi_eval_integer(ad->ad_handle, "_UID", &val);
413
414 if (ACPI_SUCCESS(rv) && ci->ci_acpiid == val)
415 goto out;
416 }
417 }
418
419 return 0;
420
421 out:
422 if (ptr != NULL)
423 *ptr = ad;
424
425 return 10;
426 }
427
428 static uint32_t
429 acpicpu_cap(struct acpicpu_softc *sc)
430 {
431 uint32_t flags, cap = 0;
432 const char *str;
433 ACPI_STATUS rv;
434
435 /*
436 * Query and set machine-dependent capabilities.
437 * Note that the Intel-specific _PDC method was
438 * deprecated in the ACPI 3.0 in favor of _OSC.
439 */
440 flags = acpicpu_md_cap();
441 rv = acpicpu_cap_osc(sc, flags, &cap);
442
443 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
444 str = "_OSC";
445 goto fail;
446 }
447
448 rv = acpicpu_cap_pdc(sc, flags);
449
450 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
451 str = "_PDC";
452 goto fail;
453 }
454
455 if (cap == 0)
456 cap = flags;
457
458 return cap;
459
460 fail:
461 aprint_error_dev(sc->sc_dev, "failed to evaluate "
462 "%s: %s\n", str, AcpiFormatException(rv));
463
464 return 0;
465 }
466
467 static ACPI_STATUS
468 acpicpu_cap_pdc(struct acpicpu_softc *sc, uint32_t flags)
469 {
470 ACPI_OBJECT_LIST arg;
471 ACPI_OBJECT obj;
472 uint32_t cap[3];
473
474 arg.Count = 1;
475 arg.Pointer = &obj;
476
477 cap[0] = ACPICPU_PDC_REVID;
478 cap[1] = 1;
479 cap[2] = flags;
480
481 obj.Type = ACPI_TYPE_BUFFER;
482 obj.Buffer.Length = sizeof(cap);
483 obj.Buffer.Pointer = (void *)cap;
484
485 return AcpiEvaluateObject(sc->sc_node->ad_handle, "_PDC", &arg, NULL);
486 }
487
488 static ACPI_STATUS
489 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
490 {
491 ACPI_OBJECT_LIST arg;
492 ACPI_OBJECT obj[4];
493 ACPI_OBJECT *osc;
494 ACPI_BUFFER buf;
495 ACPI_STATUS rv;
496 uint32_t cap[2];
497 uint32_t *ptr;
498 int i = 5;
499
500 static uint8_t intel_uuid[16] = {
501 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
502 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
503 };
504
505 cap[0] = ACPI_OSC_QUERY;
506 cap[1] = flags;
507
508 again:
509 arg.Count = 4;
510 arg.Pointer = obj;
511
512 obj[0].Type = ACPI_TYPE_BUFFER;
513 obj[0].Buffer.Length = sizeof(intel_uuid);
514 obj[0].Buffer.Pointer = intel_uuid;
515
516 obj[1].Type = ACPI_TYPE_INTEGER;
517 obj[1].Integer.Value = ACPICPU_PDC_REVID;
518
519 obj[2].Type = ACPI_TYPE_INTEGER;
520 obj[2].Integer.Value = __arraycount(cap);
521
522 obj[3].Type = ACPI_TYPE_BUFFER;
523 obj[3].Buffer.Length = sizeof(cap);
524 obj[3].Buffer.Pointer = (void *)cap;
525
526 buf.Pointer = NULL;
527 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
528
529 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
530
531 if (ACPI_FAILURE(rv))
532 goto out;
533
534 osc = buf.Pointer;
535
536 if (osc->Type != ACPI_TYPE_BUFFER) {
537 rv = AE_TYPE;
538 goto out;
539 }
540
541 if (osc->Buffer.Length != sizeof(cap)) {
542 rv = AE_BUFFER_OVERFLOW;
543 goto out;
544 }
545
546 ptr = (uint32_t *)osc->Buffer.Pointer;
547
548 if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
549 rv = AE_ERROR;
550 goto out;
551 }
552
553 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
554 rv = AE_BAD_PARAMETER;
555 goto out;
556 }
557
558 /*
559 * "It is strongly recommended that the OS evaluate
560 * _OSC with the Query Support Flag set until _OSC
561 * returns the Capabilities Masked bit clear, to
562 * negotiate the set of features to be granted to
563 * the OS for native support (ACPI 4.0, 6.2.10)."
564 */
565 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
566
567 ACPI_FREE(buf.Pointer);
568 i--;
569
570 goto again;
571 }
572
573 if ((cap[0] & ACPI_OSC_QUERY) != 0) {
574
575 ACPI_FREE(buf.Pointer);
576 cap[0] &= ~ACPI_OSC_QUERY;
577
578 goto again;
579 }
580
581 /*
582 * It is permitted for _OSC to return all
583 * bits cleared, but this is specified to
584 * vary on per-device basis. Assume that
585 * everything rather than nothing will be
586 * supported in this case; we do not need
587 * the firmware to know the CPU features.
588 */
589 *val = (ptr[1] != 0) ? ptr[1] : cap[1];
590
591 out:
592 if (buf.Pointer != NULL)
593 ACPI_FREE(buf.Pointer);
594
595 return rv;
596 }
597
598 static void
599 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
600 {
601 ACPI_OSD_EXEC_CALLBACK func;
602 struct acpicpu_softc *sc;
603 device_t self = aux;
604
605 sc = device_private(self);
606
607 if (sc->sc_cold != false)
608 return;
609
610 if (acpicpu_dynamic != true)
611 return;
612
613 switch (evt) {
614
615 case ACPICPU_C_NOTIFY:
616
617 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
618 return;
619
620 func = acpicpu_cstate_callback;
621 break;
622
623 case ACPICPU_P_NOTIFY:
624
625 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
626 return;
627
628 func = acpicpu_pstate_callback;
629 break;
630
631 case ACPICPU_T_NOTIFY:
632
633 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
634 return;
635
636 func = acpicpu_tstate_callback;
637 break;
638
639 default:
640 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt);
641 return;
642 }
643
644 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
645 }
646
647 static bool
648 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
649 {
650 struct acpicpu_softc *sc = device_private(self);
651
652 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
653 (void)acpicpu_cstate_suspend(self);
654
655 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
656 (void)acpicpu_pstate_suspend(self);
657
658 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
659 (void)acpicpu_tstate_suspend(self);
660
661 sc->sc_cold = true;
662
663 return true;
664 }
665
666 static bool
667 acpicpu_resume(device_t self, const pmf_qual_t *qual)
668 {
669 struct acpicpu_softc *sc = device_private(self);
670
671 sc->sc_cold = false;
672
673 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
674 (void)acpicpu_cstate_resume(self);
675
676 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
677 (void)acpicpu_pstate_resume(self);
678
679 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
680 (void)acpicpu_tstate_resume(self);
681
682 return true;
683 }
684
685 static void
686 acpicpu_evcnt_attach(device_t self)
687 {
688 struct acpicpu_softc *sc = device_private(self);
689 struct acpicpu_cstate *cs;
690 struct acpicpu_pstate *ps;
691 struct acpicpu_tstate *ts;
692 const char *str;
693 uint32_t i;
694
695 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
696
697 cs = &sc->sc_cstate[i];
698
699 if (cs->cs_method == 0)
700 continue;
701
702 str = "HALT";
703
704 if (cs->cs_method == ACPICPU_C_STATE_FFH)
705 str = "MWAIT";
706
707 if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
708 str = "I/O";
709
710 (void)snprintf(cs->cs_name, sizeof(cs->cs_name),
711 "C%d (%s)", i, str);
712
713 evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
714 NULL, device_xname(sc->sc_dev), cs->cs_name);
715 }
716
717 for (i = 0; i < sc->sc_pstate_count; i++) {
718
719 ps = &sc->sc_pstate[i];
720
721 if (ps->ps_freq == 0)
722 continue;
723
724 (void)snprintf(ps->ps_name, sizeof(ps->ps_name),
725 "P%u (%u MHz)", i, ps->ps_freq);
726
727 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
728 NULL, device_xname(sc->sc_dev), ps->ps_name);
729 }
730
731 for (i = 0; i < sc->sc_tstate_count; i++) {
732
733 ts = &sc->sc_tstate[i];
734
735 if (ts->ts_percent == 0)
736 continue;
737
738 (void)snprintf(ts->ts_name, sizeof(ts->ts_name),
739 "T%u (%u %%)", i, ts->ts_percent);
740
741 evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
742 NULL, device_xname(sc->sc_dev), ts->ts_name);
743 }
744 }
745
746 static void
747 acpicpu_evcnt_detach(device_t self)
748 {
749 struct acpicpu_softc *sc = device_private(self);
750 struct acpicpu_cstate *cs;
751 struct acpicpu_pstate *ps;
752 struct acpicpu_tstate *ts;
753 uint32_t i;
754
755 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
756
757 cs = &sc->sc_cstate[i];
758
759 if (cs->cs_method != 0)
760 evcnt_detach(&cs->cs_evcnt);
761 }
762
763 for (i = 0; i < sc->sc_pstate_count; i++) {
764
765 ps = &sc->sc_pstate[i];
766
767 if (ps->ps_freq != 0)
768 evcnt_detach(&ps->ps_evcnt);
769 }
770
771 for (i = 0; i < sc->sc_tstate_count; i++) {
772
773 ts = &sc->sc_tstate[i];
774
775 if (ts->ts_percent != 0)
776 evcnt_detach(&ts->ts_evcnt);
777 }
778 }
779
780 static void
781 acpicpu_debug_print(device_t self)
782 {
783 struct acpicpu_softc *sc = device_private(self);
784 struct cpu_info *ci = sc->sc_ci;
785 struct acpicpu_cstate *cs;
786 struct acpicpu_pstate *ps;
787 struct acpicpu_tstate *ts;
788 static bool once = false;
789 struct acpicpu_dep *dep;
790 uint32_t i, method;
791
792 if (once != true) {
793
794 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
795
796 cs = &sc->sc_cstate[i];
797
798 if (cs->cs_method == 0)
799 continue;
800
801 aprint_verbose_dev(sc->sc_dev, "C%d: %3s, "
802 "lat %3u us, pow %5u mW, %s\n", i,
803 acpicpu_debug_print_method(cs->cs_method),
804 cs->cs_latency, cs->cs_power,
805 (cs->cs_flags != 0) ? "bus master check" : "");
806 }
807
808 method = sc->sc_pstate_control.reg_spaceid;
809
810 for (i = 0; i < sc->sc_pstate_count; i++) {
811
812 ps = &sc->sc_pstate[i];
813
814 if (ps->ps_freq == 0)
815 continue;
816
817 aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
818 "lat %3u us, pow %5u mW, %4u MHz\n", i,
819 acpicpu_debug_print_method(method),
820 ps->ps_latency, ps->ps_power, ps->ps_freq);
821 }
822
823 method = sc->sc_tstate_control.reg_spaceid;
824
825 for (i = 0; i < sc->sc_tstate_count; i++) {
826
827 ts = &sc->sc_tstate[i];
828
829 if (ts->ts_percent == 0)
830 continue;
831
832 aprint_verbose_dev(sc->sc_dev, "T%u: %3s, "
833 "lat %3u us, pow %5u mW, %3u %%\n", i,
834 acpicpu_debug_print_method(method),
835 ts->ts_latency, ts->ts_power, ts->ts_percent);
836 }
837
838 once = true;
839 }
840
841 aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
842 "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
843 (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
844
845 if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
846
847 dep = &sc->sc_cstate_dep;
848
849 aprint_debug_dev(sc->sc_dev, "C-state coordination: "
850 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
851 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
852 }
853
854 if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
855
856 dep = &sc->sc_pstate_dep;
857
858 aprint_debug_dev(sc->sc_dev, "P-state coordination: "
859 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
860 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
861 }
862
863 if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
864
865 dep = &sc->sc_tstate_dep;
866
867 aprint_debug_dev(sc->sc_dev, "T-state coordination: "
868 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
869 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
870 }
871 }
872
873 static const char *
874 acpicpu_debug_print_method(uint8_t val)
875 {
876
877 switch (val) {
878
879 case ACPICPU_C_STATE_HALT:
880 return "HLT";
881
882 case ACPICPU_C_STATE_FFH:
883 case ACPI_ADR_SPACE_FIXED_HARDWARE:
884 return "FFH";
885
886 case ACPICPU_C_STATE_SYSIO: /* ACPI_ADR_SPACE_SYSTEM_IO */
887 return "I/O";
888
889 default:
890 return "???";
891 }
892 }
893
894 static const char *
895 acpicpu_debug_print_dep(uint32_t val)
896 {
897
898 switch (val) {
899
900 case ACPICPU_DEP_SW_ALL:
901 return "SW_ALL";
902
903 case ACPICPU_DEP_SW_ANY:
904 return "SW_ANY";
905
906 case ACPICPU_DEP_HW_ALL:
907 return "HW_ALL";
908
909 default:
910 return "unknown";
911 }
912 }
913
914 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
915
916 #ifdef _MODULE
917 #include "ioconf.c"
918 #endif
919
920 static int
921 acpicpu_modcmd(modcmd_t cmd, void *aux)
922 {
923 int rv = 0;
924
925 switch (cmd) {
926
927 case MODULE_CMD_INIT:
928
929 #ifdef _MODULE
930 rv = config_init_component(cfdriver_ioconf_acpicpu,
931 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
932 #endif
933 break;
934
935 case MODULE_CMD_FINI:
936
937 #ifdef _MODULE
938 rv = config_fini_component(cfdriver_ioconf_acpicpu,
939 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
940 #endif
941 break;
942
943 default:
944 rv = ENOTTY;
945 }
946
947 return rv;
948 }
949