acpi_cpu.c revision 1.30 1 /* $NetBSD: acpi_cpu.c,v 1.30 2011/02/27 17:10:33 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.30 2011/02/27 17:10:33 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/sysctl.h>
39
40 #include <dev/acpi/acpireg.h>
41 #include <dev/acpi/acpivar.h>
42 #include <dev/acpi/acpi_cpu.h>
43
44 #include <machine/acpi_machdep.h>
45 #include <machine/cpuvar.h>
46
47 #define _COMPONENT ACPI_BUS_COMPONENT
48 ACPI_MODULE_NAME ("acpi_cpu")
49
50 static int acpicpu_match(device_t, cfdata_t, void *);
51 static void acpicpu_attach(device_t, device_t, void *);
52 static int acpicpu_detach(device_t, int);
53 static int acpicpu_once_attach(void);
54 static int acpicpu_once_detach(void);
55 static void acpicpu_start(device_t);
56 static void acpicpu_debug_print(device_t);
57 static const char *acpicpu_debug_print_dep(uint32_t);
58 static void acpicpu_sysctl(device_t);
59
60 static ACPI_STATUS acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
61 static int acpicpu_find(struct cpu_info *,
62 struct acpi_devnode **);
63 static uint32_t acpicpu_cap(struct acpicpu_softc *);
64 static ACPI_STATUS acpicpu_cap_pdc(struct acpicpu_softc *, uint32_t);
65 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *,
66 uint32_t, uint32_t *);
67 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
68 static bool acpicpu_suspend(device_t, const pmf_qual_t *);
69 static bool acpicpu_resume(device_t, const pmf_qual_t *);
70
71 static uint32_t acpicpu_count = 0;
72 struct acpicpu_softc **acpicpu_sc = NULL;
73 static struct sysctllog *acpicpu_log = NULL;
74 static bool acpicpu_dynamic = true;
75 static bool acpicpu_passive = true;
76
77 static const char * const acpicpu_hid[] = {
78 "ACPI0007",
79 NULL
80 };
81
82 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
83 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
84
85 static int
86 acpicpu_match(device_t parent, cfdata_t match, void *aux)
87 {
88 struct cpufeature_attach_args *cfaa = aux;
89
90 if (acpi_softc == NULL)
91 return 0;
92
93 if (strcmp(cfaa->name, "frequency") != 0)
94 return 0;
95
96 return acpicpu_find(cfaa->ci, NULL);
97 }
98
99 static void
100 acpicpu_attach(device_t parent, device_t self, void *aux)
101 {
102 struct acpicpu_softc *sc = device_private(self);
103 struct cpufeature_attach_args *cfaa = aux;
104 struct cpu_info *ci = cfaa->ci;
105 cpuid_t id;
106 int rv;
107
108 sc->sc_ci = ci;
109 sc->sc_dev = self;
110 sc->sc_cold = true;
111 sc->sc_node = NULL;
112
113 rv = acpicpu_find(ci, &sc->sc_node);
114
115 if (rv == 0) {
116 aprint_normal(": failed to match processor\n");
117 return;
118 }
119
120 if (acpicpu_once_attach() != 0) {
121 aprint_normal(": failed to initialize\n");
122 return;
123 }
124
125 KASSERT(acpi_softc != NULL);
126 KASSERT(acpicpu_sc != NULL);
127 KASSERT(sc->sc_node != NULL);
128
129 id = sc->sc_ci->ci_acpiid;
130
131 if (acpicpu_sc[id] != NULL) {
132 aprint_normal(": already attached\n");
133 return;
134 }
135
136 aprint_naive("\n");
137 aprint_normal(": ACPI CPU\n");
138
139 rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
140
141 if (ACPI_FAILURE(rv))
142 aprint_verbose_dev(self, "failed to obtain CPU object\n");
143
144 acpicpu_count++;
145 acpicpu_sc[id] = sc;
146
147 sc->sc_cap = acpicpu_cap(sc);
148 sc->sc_ncpus = acpi_md_ncpus();
149 sc->sc_flags = acpicpu_md_flags();
150
151 KASSERT(acpicpu_count <= sc->sc_ncpus);
152 KASSERT(sc->sc_node->ad_device == NULL);
153
154 sc->sc_node->ad_device = self;
155 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
156
157 acpicpu_cstate_attach(self);
158 acpicpu_pstate_attach(self);
159 acpicpu_tstate_attach(self);
160
161 acpicpu_debug_print(self);
162
163 (void)config_interrupts(self, acpicpu_start);
164 (void)acpi_register_notify(sc->sc_node, acpicpu_notify);
165 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
166 }
167
168 static int
169 acpicpu_detach(device_t self, int flags)
170 {
171 struct acpicpu_softc *sc = device_private(self);
172 int rv = 0;
173
174 sc->sc_cold = true;
175 acpi_deregister_notify(sc->sc_node);
176
177 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
178 rv = acpicpu_cstate_detach(self);
179
180 if (rv != 0)
181 return rv;
182
183 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
184 rv = acpicpu_pstate_detach(self);
185
186 if (rv != 0)
187 return rv;
188
189 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
190 rv = acpicpu_tstate_detach(self);
191
192 if (rv != 0)
193 return rv;
194
195 mutex_destroy(&sc->sc_mtx);
196 sc->sc_node->ad_device = NULL;
197
198 acpicpu_count--;
199 acpicpu_once_detach();
200
201 return 0;
202 }
203
204 static int
205 acpicpu_once_attach(void)
206 {
207 struct acpicpu_softc *sc;
208 unsigned int i;
209
210 if (acpicpu_count != 0)
211 return 0;
212
213 KASSERT(acpicpu_sc == NULL);
214 KASSERT(acpicpu_log == NULL);
215
216 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
217
218 if (acpicpu_sc == NULL)
219 return ENOMEM;
220
221 for (i = 0; i < maxcpus; i++)
222 acpicpu_sc[i] = NULL;
223
224 return 0;
225 }
226
227 static int
228 acpicpu_once_detach(void)
229 {
230 struct acpicpu_softc *sc;
231
232 if (acpicpu_count != 0)
233 return EDEADLK;
234
235 if (acpicpu_log != NULL)
236 sysctl_teardown(&acpicpu_log);
237
238 if (acpicpu_sc != NULL)
239 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
240
241 return 0;
242 }
243
244 static void
245 acpicpu_start(device_t self)
246 {
247 struct acpicpu_softc *sc = device_private(self);
248 static uint32_t count = 0;
249
250 /*
251 * Run the state-specific initialization routines. These
252 * must run only once, after interrupts have been enabled,
253 * all CPUs are running, and all ACPI CPUs have attached.
254 */
255 if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
256 sc->sc_cold = false;
257 return;
258 }
259
260 /*
261 * Set the last ACPI CPU as non-cold
262 * only after C-states are enabled.
263 */
264 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
265 acpicpu_cstate_start(self);
266
267 sc->sc_cold = false;
268
269 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
270 acpicpu_pstate_start(self);
271
272 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
273 acpicpu_tstate_start(self);
274
275 acpicpu_sysctl(self);
276 aprint_debug_dev(self, "ACPI CPUs started\n");
277 }
278
279 static void
280 acpicpu_sysctl(device_t self)
281 {
282 const struct sysctlnode *node;
283 int err;
284
285 KASSERT(acpicpu_log == NULL);
286
287 err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
288 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
289 NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
290
291 if (err != 0)
292 goto fail;
293
294 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
295 CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
296 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
297
298 if (err != 0)
299 goto fail;
300
301 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
302 0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
303 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
304
305 if (err != 0)
306 goto fail;
307
308 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
309 CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
310 SYSCTL_DESCR("Dynamic states"), NULL, 0,
311 &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
312
313 if (err != 0)
314 goto fail;
315
316 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
317 CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
318 SYSCTL_DESCR("Passive cooling"), NULL, 0,
319 &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
320
321 if (err != 0)
322 goto fail;
323
324 return;
325
326 fail:
327 aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
328 }
329
330 static ACPI_STATUS
331 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
332 {
333 ACPI_OBJECT *obj;
334 ACPI_BUFFER buf;
335 ACPI_STATUS rv;
336
337 rv = acpi_eval_struct(hdl, NULL, &buf);
338
339 if (ACPI_FAILURE(rv))
340 goto out;
341
342 obj = buf.Pointer;
343
344 if (obj->Type != ACPI_TYPE_PROCESSOR) {
345 rv = AE_TYPE;
346 goto out;
347 }
348
349 if (obj->Processor.ProcId > (uint32_t)maxcpus) {
350 rv = AE_LIMIT;
351 goto out;
352 }
353
354 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
355
356 if (ao != NULL) {
357 ao->ao_procid = obj->Processor.ProcId;
358 ao->ao_pblklen = obj->Processor.PblkLength;
359 ao->ao_pblkaddr = obj->Processor.PblkAddress;
360 }
361
362 out:
363 if (buf.Pointer != NULL)
364 ACPI_FREE(buf.Pointer);
365
366 return rv;
367 }
368
369 static int
370 acpicpu_find(struct cpu_info *ci, struct acpi_devnode **ptr)
371 {
372 struct acpi_softc *sc = acpi_softc;
373 struct acpicpu_object ao;
374 struct acpi_devnode *ad;
375 ACPI_INTEGER val;
376 ACPI_STATUS rv;
377
378 if (sc == NULL || acpi_active == 0)
379 return 0;
380
381 /*
382 * CPUs are declared in the ACPI namespace
383 * either as a Processor() or as a Device().
384 * In both cases the MADT entries are used
385 * for the match (see ACPI 4.0, section 8.4).
386 */
387 SIMPLEQ_FOREACH(ad, &sc->ad_head, ad_list) {
388
389 if (ad->ad_type == ACPI_TYPE_PROCESSOR) {
390
391 rv = acpicpu_object(ad->ad_handle, &ao);
392
393 if (ACPI_SUCCESS(rv) && ci->ci_acpiid == ao.ao_procid)
394 goto out;
395 }
396
397 if (acpi_match_hid(ad->ad_devinfo, acpicpu_hid) != 0) {
398
399 rv = acpi_eval_integer(ad->ad_handle, "_UID", &val);
400
401 if (ACPI_SUCCESS(rv) && ci->ci_acpiid == val)
402 goto out;
403 }
404 }
405
406 return 0;
407
408 out:
409 if (ptr != NULL)
410 *ptr = ad;
411
412 return 10; /* Beat est(4) and powernow(4). */
413 }
414
415 static uint32_t
416 acpicpu_cap(struct acpicpu_softc *sc)
417 {
418 uint32_t flags, cap = 0;
419 const char *str;
420 ACPI_STATUS rv;
421
422 /*
423 * Query and set machine-dependent capabilities.
424 * Note that the Intel-specific _PDC method was
425 * deprecated in the ACPI 3.0 in favor of _OSC.
426 */
427 flags = acpicpu_md_cap();
428 rv = acpicpu_cap_osc(sc, flags, &cap);
429
430 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
431 str = "_OSC";
432 goto fail;
433 }
434
435 rv = acpicpu_cap_pdc(sc, flags);
436
437 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
438 str = "_PDC";
439 goto fail;
440 }
441
442 if (cap == 0)
443 cap = flags;
444
445 return cap;
446
447 fail:
448 aprint_error_dev(sc->sc_dev, "failed to evaluate "
449 "%s: %s\n", str, AcpiFormatException(rv));
450
451 return 0;
452 }
453
454 static ACPI_STATUS
455 acpicpu_cap_pdc(struct acpicpu_softc *sc, uint32_t flags)
456 {
457 ACPI_OBJECT_LIST arg;
458 ACPI_OBJECT obj;
459 uint32_t cap[3];
460
461 arg.Count = 1;
462 arg.Pointer = &obj;
463
464 cap[0] = ACPICPU_PDC_REVID;
465 cap[1] = 1;
466 cap[2] = flags;
467
468 obj.Type = ACPI_TYPE_BUFFER;
469 obj.Buffer.Length = sizeof(cap);
470 obj.Buffer.Pointer = (void *)cap;
471
472 return AcpiEvaluateObject(sc->sc_node->ad_handle, "_PDC", &arg, NULL);
473 }
474
475 static ACPI_STATUS
476 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
477 {
478 ACPI_OBJECT_LIST arg;
479 ACPI_OBJECT obj[4];
480 ACPI_OBJECT *osc;
481 ACPI_BUFFER buf;
482 ACPI_STATUS rv;
483 uint32_t cap[2];
484 uint32_t *ptr;
485 int i = 5;
486
487 static uint8_t intel_uuid[16] = {
488 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
489 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
490 };
491
492 cap[0] = ACPI_OSC_QUERY;
493 cap[1] = flags;
494
495 again:
496 arg.Count = 4;
497 arg.Pointer = obj;
498
499 obj[0].Type = ACPI_TYPE_BUFFER;
500 obj[0].Buffer.Length = sizeof(intel_uuid);
501 obj[0].Buffer.Pointer = intel_uuid;
502
503 obj[1].Type = ACPI_TYPE_INTEGER;
504 obj[1].Integer.Value = ACPICPU_PDC_REVID;
505
506 obj[2].Type = ACPI_TYPE_INTEGER;
507 obj[2].Integer.Value = __arraycount(cap);
508
509 obj[3].Type = ACPI_TYPE_BUFFER;
510 obj[3].Buffer.Length = sizeof(cap);
511 obj[3].Buffer.Pointer = (void *)cap;
512
513 buf.Pointer = NULL;
514 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
515
516 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
517
518 if (ACPI_FAILURE(rv))
519 goto out;
520
521 osc = buf.Pointer;
522
523 if (osc->Type != ACPI_TYPE_BUFFER) {
524 rv = AE_TYPE;
525 goto out;
526 }
527
528 if (osc->Buffer.Length != sizeof(cap)) {
529 rv = AE_BUFFER_OVERFLOW;
530 goto out;
531 }
532
533 ptr = (uint32_t *)osc->Buffer.Pointer;
534
535 if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
536 rv = AE_ERROR;
537 goto out;
538 }
539
540 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
541 rv = AE_BAD_PARAMETER;
542 goto out;
543 }
544
545 /*
546 * "It is strongly recommended that the OS evaluate
547 * _OSC with the Query Support Flag set until _OSC
548 * returns the Capabilities Masked bit clear, to
549 * negotiate the set of features to be granted to
550 * the OS for native support (ACPI 4.0, 6.2.10)."
551 */
552 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
553
554 ACPI_FREE(buf.Pointer);
555 i--;
556
557 goto again;
558 }
559
560 if ((cap[0] & ACPI_OSC_QUERY) != 0) {
561
562 ACPI_FREE(buf.Pointer);
563 cap[0] &= ~ACPI_OSC_QUERY;
564
565 goto again;
566 }
567
568 /*
569 * It is permitted for _OSC to return all
570 * bits cleared, but this is specified to
571 * vary on per-device basis. Assume that
572 * everything rather than nothing will be
573 * supported in this case; we do not need
574 * the firmware to know the CPU features.
575 */
576 *val = (ptr[1] != 0) ? ptr[1] : cap[1];
577
578 out:
579 if (buf.Pointer != NULL)
580 ACPI_FREE(buf.Pointer);
581
582 return rv;
583 }
584
585 static void
586 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
587 {
588 ACPI_OSD_EXEC_CALLBACK func;
589 struct acpicpu_softc *sc;
590 device_t self = aux;
591
592 sc = device_private(self);
593
594 if (sc->sc_cold != false)
595 return;
596
597 if (acpicpu_dynamic != true)
598 return;
599
600 switch (evt) {
601
602 case ACPICPU_C_NOTIFY:
603
604 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
605 return;
606
607 func = acpicpu_cstate_callback;
608 break;
609
610 case ACPICPU_P_NOTIFY:
611
612 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
613 return;
614
615 func = acpicpu_pstate_callback;
616 break;
617
618 case ACPICPU_T_NOTIFY:
619
620 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
621 return;
622
623 func = acpicpu_tstate_callback;
624 break;
625
626 default:
627 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt);
628 return;
629 }
630
631 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
632 }
633
634 static bool
635 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
636 {
637 struct acpicpu_softc *sc = device_private(self);
638
639 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
640 (void)acpicpu_cstate_suspend(self);
641
642 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
643 (void)acpicpu_pstate_suspend(self);
644
645 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
646 (void)acpicpu_tstate_suspend(self);
647
648 sc->sc_cold = true;
649
650 return true;
651 }
652
653 static bool
654 acpicpu_resume(device_t self, const pmf_qual_t *qual)
655 {
656 struct acpicpu_softc *sc = device_private(self);
657
658 sc->sc_cold = false;
659
660 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
661 (void)acpicpu_cstate_resume(self);
662
663 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
664 (void)acpicpu_pstate_resume(self);
665
666 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
667 (void)acpicpu_tstate_resume(self);
668
669 return true;
670 }
671
672 static void
673 acpicpu_debug_print(device_t self)
674 {
675 struct acpicpu_softc *sc = device_private(self);
676 struct cpu_info *ci = sc->sc_ci;
677 struct acpicpu_dep *dep;
678
679 aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
680 "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
681 (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
682
683 if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
684
685 dep = &sc->sc_cstate_dep;
686
687 aprint_debug_dev(sc->sc_dev, "C-state coordination: "
688 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
689 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
690 }
691
692 if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
693
694 dep = &sc->sc_pstate_dep;
695
696 aprint_debug_dev(sc->sc_dev, "P-state coordination: "
697 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
698 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
699 }
700
701 if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
702
703 dep = &sc->sc_tstate_dep;
704
705 aprint_debug_dev(sc->sc_dev, "T-state coordination: "
706 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
707 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
708 }
709 }
710
711 static const char *
712 acpicpu_debug_print_dep(uint32_t val)
713 {
714
715 switch (val) {
716
717 case ACPICPU_DEP_SW_ALL:
718 return "SW_ALL";
719
720 case ACPICPU_DEP_SW_ANY:
721 return "SW_ANY";
722
723 case ACPICPU_DEP_HW_ALL:
724 return "HW_ALL";
725
726 default:
727 return "unknown";
728 }
729 }
730
731 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
732
733 #ifdef _MODULE
734 #include "ioconf.c"
735 #endif
736
737 static int
738 acpicpu_modcmd(modcmd_t cmd, void *aux)
739 {
740 int rv = 0;
741
742 switch (cmd) {
743
744 case MODULE_CMD_INIT:
745
746 #ifdef _MODULE
747 rv = config_init_component(cfdriver_ioconf_acpicpu,
748 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
749 #endif
750 break;
751
752 case MODULE_CMD_FINI:
753
754 #ifdef _MODULE
755 rv = config_fini_component(cfdriver_ioconf_acpicpu,
756 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
757 #endif
758 break;
759
760 default:
761 rv = ENOTTY;
762 }
763
764 return rv;
765 }
766