acpi_cpu.c revision 1.45 1 /* $NetBSD: acpi_cpu.c,v 1.45 2011/10/18 05:08:24 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.45 2011/10/18 05:08:24 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/evcnt.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/cpufreq.h>
41
42 #include <dev/acpi/acpireg.h>
43 #include <dev/acpi/acpivar.h>
44 #include <dev/acpi/acpi_cpu.h>
45
46 #include <machine/acpi_machdep.h>
47 #include <machine/cpuvar.h>
48
49 #define _COMPONENT ACPI_BUS_COMPONENT
50 ACPI_MODULE_NAME ("acpi_cpu")
51
52 static int acpicpu_match(device_t, cfdata_t, void *);
53 static void acpicpu_attach(device_t, device_t, void *);
54 static int acpicpu_detach(device_t, int);
55 static int acpicpu_once_attach(void);
56 static int acpicpu_once_detach(void);
57 static void acpicpu_start(device_t);
58 static void acpicpu_sysctl(device_t);
59
60 static ACPI_STATUS acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
61 static uint32_t acpicpu_cap(struct acpicpu_softc *);
62 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *,
63 uint32_t, uint32_t *);
64 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
65 static bool acpicpu_suspend(device_t, const pmf_qual_t *);
66 static bool acpicpu_resume(device_t, const pmf_qual_t *);
67 static void acpicpu_evcnt_attach(device_t);
68 static void acpicpu_evcnt_detach(device_t);
69 static void acpicpu_debug_print(device_t);
70 static const char *acpicpu_debug_print_method_c(uint8_t);
71 static const char *acpicpu_debug_print_method_pt(uint8_t);
72 static const char *acpicpu_debug_print_dep(uint32_t);
73
74 static uint32_t acpicpu_count = 0;
75 struct acpicpu_softc **acpicpu_sc = NULL;
76 static struct sysctllog *acpicpu_log = NULL;
77 static bool acpicpu_dynamic = true;
78 static bool acpicpu_passive = true;
79
80 static const struct {
81 const char *manu;
82 const char *prod;
83 const char *vers;
84 } acpicpu_quirks[] = {
85 { "Supermicro", "PDSMi-LN4", "0123456789" },
86 };
87
88 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
89 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
90
91 static int
92 acpicpu_match(device_t parent, cfdata_t match, void *aux)
93 {
94 const char *manu, *prod, *vers;
95 struct cpu_info *ci;
96 size_t i;
97
98 if (acpi_softc == NULL)
99 return 0;
100
101 manu = pmf_get_platform("system-manufacturer");
102 prod = pmf_get_platform("system-product-name");
103 vers = pmf_get_platform("system-version");
104
105 if (manu != NULL && prod != NULL && vers != NULL) {
106
107 for (i = 0; i < __arraycount(acpicpu_quirks); i++) {
108
109 if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 &&
110 strcasecmp(acpicpu_quirks[i].prod, prod) == 0 &&
111 strcasecmp(acpicpu_quirks[i].vers, vers) == 0)
112 return 0;
113 }
114 }
115
116 ci = acpicpu_md_match(parent, match, aux);
117
118 if (ci == NULL)
119 return 0;
120
121 if (acpi_match_cpu_info(ci) == NULL)
122 return 0;
123
124 return 10;
125 }
126
127 static void
128 acpicpu_attach(device_t parent, device_t self, void *aux)
129 {
130 struct acpicpu_softc *sc = device_private(self);
131 struct cpu_info *ci;
132 ACPI_HANDLE hdl;
133 cpuid_t id;
134 int rv;
135
136 ci = acpicpu_md_attach(parent, self, aux);
137
138 if (ci == NULL)
139 return;
140
141 sc->sc_ci = ci;
142 sc->sc_dev = self;
143 sc->sc_cold = true;
144
145 hdl = acpi_match_cpu_info(ci);
146
147 if (hdl == NULL) {
148 aprint_normal(": failed to match processor\n");
149 return;
150 }
151
152 sc->sc_node = acpi_match_node(hdl);
153
154 if (acpicpu_once_attach() != 0) {
155 aprint_normal(": failed to initialize\n");
156 return;
157 }
158
159 KASSERT(acpi_softc != NULL);
160 KASSERT(acpicpu_sc != NULL);
161 KASSERT(sc->sc_node != NULL);
162
163 id = sc->sc_ci->ci_acpiid;
164
165 if (acpicpu_sc[id] != NULL) {
166 aprint_normal(": already attached\n");
167 return;
168 }
169
170 aprint_naive("\n");
171 aprint_normal(": ACPI CPU\n");
172
173 rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object);
174
175 if (ACPI_FAILURE(rv))
176 aprint_verbose_dev(self, "failed to obtain CPU object\n");
177
178 acpicpu_count++;
179 acpicpu_sc[id] = sc;
180
181 sc->sc_cap = acpicpu_cap(sc);
182 sc->sc_ncpus = acpi_md_ncpus();
183 sc->sc_flags = acpicpu_md_flags();
184
185 KASSERT(acpicpu_count <= sc->sc_ncpus);
186 KASSERT(sc->sc_node->ad_device == NULL);
187
188 sc->sc_node->ad_device = self;
189 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
190
191 acpicpu_cstate_attach(self);
192 acpicpu_pstate_attach(self);
193 acpicpu_tstate_attach(self);
194
195 acpicpu_debug_print(self);
196 acpicpu_evcnt_attach(self);
197
198 (void)config_interrupts(self, acpicpu_start);
199 (void)acpi_register_notify(sc->sc_node, acpicpu_notify);
200 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
201 }
202
203 static int
204 acpicpu_detach(device_t self, int flags)
205 {
206 struct acpicpu_softc *sc = device_private(self);
207
208 sc->sc_cold = true;
209
210 acpicpu_evcnt_detach(self);
211 acpi_deregister_notify(sc->sc_node);
212
213 acpicpu_cstate_detach(self);
214 acpicpu_pstate_detach(self);
215 acpicpu_tstate_detach(self);
216
217 mutex_destroy(&sc->sc_mtx);
218 sc->sc_node->ad_device = NULL;
219
220 acpicpu_count--;
221 acpicpu_once_detach();
222
223 return 0;
224 }
225
226 static int
227 acpicpu_once_attach(void)
228 {
229 struct acpicpu_softc *sc;
230 unsigned int i;
231
232 if (acpicpu_count != 0)
233 return 0;
234
235 KASSERT(acpicpu_sc == NULL);
236 KASSERT(acpicpu_log == NULL);
237
238 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
239
240 if (acpicpu_sc == NULL)
241 return ENOMEM;
242
243 for (i = 0; i < maxcpus; i++)
244 acpicpu_sc[i] = NULL;
245
246 return 0;
247 }
248
249 static int
250 acpicpu_once_detach(void)
251 {
252 struct cpu_info *ci = curcpu();
253 struct acpicpu_softc *sc;
254
255 if (acpicpu_count != 0)
256 return EDEADLK;
257
258 if (acpicpu_log != NULL)
259 sysctl_teardown(&acpicpu_log);
260
261 if (acpicpu_sc != NULL) {
262
263 sc = acpicpu_sc[ci->ci_acpiid];
264
265 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
266 cpufreq_deregister();
267
268 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
269 }
270
271 return 0;
272 }
273
274 static void
275 acpicpu_start(device_t self)
276 {
277 struct acpicpu_softc *sc = device_private(self);
278 static uint32_t count = 0;
279 struct cpufreq cf;
280 uint32_t i;
281
282 /*
283 * Run the state-specific initialization routines. These
284 * must run only once, after interrupts have been enabled,
285 * all CPUs are running, and all ACPI CPUs have attached.
286 */
287 if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) {
288 sc->sc_cold = false;
289 return;
290 }
291
292 /*
293 * Set the last ACPI CPU as non-cold
294 * only after C-states are enabled.
295 */
296 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
297 acpicpu_cstate_start(self);
298
299 sc->sc_cold = false;
300
301 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
302 acpicpu_pstate_start(self);
303
304 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
305 acpicpu_tstate_start(self);
306
307 acpicpu_sysctl(self);
308 aprint_debug_dev(self, "ACPI CPUs started\n");
309
310 /*
311 * Register with cpufreq(9).
312 */
313 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) {
314
315 (void)memset(&cf, 0, sizeof(struct cpufreq));
316
317 cf.cf_mp = false;
318 cf.cf_cookie = NULL;
319 cf.cf_get_freq = acpicpu_pstate_get;
320 cf.cf_set_freq = acpicpu_pstate_set;
321 cf.cf_state_count = sc->sc_pstate_count;
322
323 (void)strlcpy(cf.cf_name, "acpicpu", sizeof(cf.cf_name));
324
325 for (i = 0; i < sc->sc_pstate_count; i++) {
326
327 if (sc->sc_pstate[i].ps_freq == 0)
328 continue;
329
330 cf.cf_state[i].cfs_freq = sc->sc_pstate[i].ps_freq;
331 cf.cf_state[i].cfs_power = sc->sc_pstate[i].ps_power;
332 }
333
334 if (cpufreq_register(&cf) != 0)
335 aprint_error_dev(self, "failed to register cpufreq\n");
336 }
337 }
338
339 static void
340 acpicpu_sysctl(device_t self)
341 {
342 const struct sysctlnode *node;
343 int err;
344
345 KASSERT(acpicpu_log == NULL);
346
347 err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
348 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
349 NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
350
351 if (err != 0)
352 goto fail;
353
354 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
355 CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
356 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
357
358 if (err != 0)
359 goto fail;
360
361 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
362 0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
363 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
364
365 if (err != 0)
366 goto fail;
367
368 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
369 CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
370 SYSCTL_DESCR("Dynamic states"), NULL, 0,
371 &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
372
373 if (err != 0)
374 goto fail;
375
376 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
377 CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
378 SYSCTL_DESCR("Passive cooling"), NULL, 0,
379 &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
380
381 if (err != 0)
382 goto fail;
383
384 return;
385
386 fail:
387 aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
388 }
389
390 static ACPI_STATUS
391 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
392 {
393 ACPI_OBJECT *obj;
394 ACPI_BUFFER buf;
395 ACPI_STATUS rv;
396
397 rv = acpi_eval_struct(hdl, NULL, &buf);
398
399 if (ACPI_FAILURE(rv))
400 goto out;
401
402 obj = buf.Pointer;
403
404 if (obj->Type != ACPI_TYPE_PROCESSOR) {
405 rv = AE_TYPE;
406 goto out;
407 }
408
409 if (obj->Processor.ProcId > (uint32_t)maxcpus) {
410 rv = AE_LIMIT;
411 goto out;
412 }
413
414 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
415
416 if (ao != NULL) {
417 ao->ao_procid = obj->Processor.ProcId;
418 ao->ao_pblklen = obj->Processor.PblkLength;
419 ao->ao_pblkaddr = obj->Processor.PblkAddress;
420 }
421
422 out:
423 if (buf.Pointer != NULL)
424 ACPI_FREE(buf.Pointer);
425
426 return rv;
427 }
428
429 static uint32_t
430 acpicpu_cap(struct acpicpu_softc *sc)
431 {
432 uint32_t flags, cap = 0;
433 ACPI_STATUS rv;
434
435 /*
436 * Query and set machine-dependent capabilities.
437 * Note that the Intel-specific _PDC method has
438 * already been evaluated. It was furthermore
439 * deprecated in the ACPI 3.0 in favor of _OSC.
440 */
441 flags = acpi_md_pdc();
442 rv = acpicpu_cap_osc(sc, flags, &cap);
443
444 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
445
446 aprint_error_dev(sc->sc_dev, "failed to evaluate "
447 "_OSC: %s\n", AcpiFormatException(rv));
448 }
449
450 return (cap != 0) ? cap : flags;
451 }
452
453 static ACPI_STATUS
454 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
455 {
456 ACPI_OBJECT_LIST arg;
457 ACPI_OBJECT obj[4];
458 ACPI_OBJECT *osc;
459 ACPI_BUFFER buf;
460 ACPI_STATUS rv;
461 uint32_t cap[2];
462 uint32_t *ptr;
463 int i = 5;
464
465 static uint8_t intel_uuid[16] = {
466 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
467 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
468 };
469
470 cap[0] = ACPI_OSC_QUERY;
471 cap[1] = flags;
472
473 again:
474 arg.Count = 4;
475 arg.Pointer = obj;
476
477 obj[0].Type = ACPI_TYPE_BUFFER;
478 obj[0].Buffer.Length = sizeof(intel_uuid);
479 obj[0].Buffer.Pointer = intel_uuid;
480
481 obj[1].Type = ACPI_TYPE_INTEGER;
482 obj[1].Integer.Value = ACPICPU_PDC_REVID;
483
484 obj[2].Type = ACPI_TYPE_INTEGER;
485 obj[2].Integer.Value = __arraycount(cap);
486
487 obj[3].Type = ACPI_TYPE_BUFFER;
488 obj[3].Buffer.Length = sizeof(cap);
489 obj[3].Buffer.Pointer = (void *)cap;
490
491 buf.Pointer = NULL;
492 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
493
494 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
495
496 if (ACPI_FAILURE(rv))
497 goto out;
498
499 osc = buf.Pointer;
500
501 if (osc->Type != ACPI_TYPE_BUFFER) {
502 rv = AE_TYPE;
503 goto out;
504 }
505
506 if (osc->Buffer.Length != sizeof(cap)) {
507 rv = AE_BUFFER_OVERFLOW;
508 goto out;
509 }
510
511 ptr = (uint32_t *)osc->Buffer.Pointer;
512
513 if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
514 rv = AE_ERROR;
515 goto out;
516 }
517
518 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
519 rv = AE_BAD_PARAMETER;
520 goto out;
521 }
522
523 /*
524 * "It is strongly recommended that the OS evaluate
525 * _OSC with the Query Support Flag set until _OSC
526 * returns the Capabilities Masked bit clear, to
527 * negotiate the set of features to be granted to
528 * the OS for native support (ACPI 4.0, 6.2.10)."
529 */
530 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
531
532 ACPI_FREE(buf.Pointer);
533 i--;
534
535 goto again;
536 }
537
538 if ((cap[0] & ACPI_OSC_QUERY) != 0) {
539
540 ACPI_FREE(buf.Pointer);
541 cap[0] &= ~ACPI_OSC_QUERY;
542
543 goto again;
544 }
545
546 /*
547 * It is permitted for _OSC to return all
548 * bits cleared, but this is specified to
549 * vary on per-device basis. Assume that
550 * everything rather than nothing will be
551 * supported in this case; we do not need
552 * the firmware to know the CPU features.
553 */
554 *val = (ptr[1] != 0) ? ptr[1] : cap[1];
555
556 out:
557 if (buf.Pointer != NULL)
558 ACPI_FREE(buf.Pointer);
559
560 return rv;
561 }
562
563 static void
564 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
565 {
566 ACPI_OSD_EXEC_CALLBACK func;
567 struct acpicpu_softc *sc;
568 device_t self = aux;
569
570 sc = device_private(self);
571
572 if (sc->sc_cold != false)
573 return;
574
575 if (acpicpu_dynamic != true)
576 return;
577
578 switch (evt) {
579
580 case ACPICPU_C_NOTIFY:
581
582 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
583 return;
584
585 func = acpicpu_cstate_callback;
586 break;
587
588 case ACPICPU_P_NOTIFY:
589
590 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
591 return;
592
593 func = acpicpu_pstate_callback;
594 break;
595
596 case ACPICPU_T_NOTIFY:
597
598 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
599 return;
600
601 func = acpicpu_tstate_callback;
602 break;
603
604 default:
605 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt);
606 return;
607 }
608
609 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
610 }
611
612 static bool
613 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
614 {
615 struct acpicpu_softc *sc = device_private(self);
616
617 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
618 (void)acpicpu_cstate_suspend(self);
619
620 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
621 (void)acpicpu_pstate_suspend(self);
622
623 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
624 (void)acpicpu_tstate_suspend(self);
625
626 sc->sc_cold = true;
627
628 return true;
629 }
630
631 static bool
632 acpicpu_resume(device_t self, const pmf_qual_t *qual)
633 {
634 struct acpicpu_softc *sc = device_private(self);
635 static const int handler = OSL_NOTIFY_HANDLER;
636
637 sc->sc_cold = false;
638
639 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
640 (void)AcpiOsExecute(handler, acpicpu_cstate_resume, self);
641
642 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
643 (void)AcpiOsExecute(handler, acpicpu_pstate_resume, self);
644
645 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
646 (void)AcpiOsExecute(handler, acpicpu_tstate_resume, self);
647
648 return true;
649 }
650
651 static void
652 acpicpu_evcnt_attach(device_t self)
653 {
654 struct acpicpu_softc *sc = device_private(self);
655 struct acpicpu_cstate *cs;
656 struct acpicpu_pstate *ps;
657 struct acpicpu_tstate *ts;
658 const char *str;
659 uint32_t i;
660
661 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
662
663 cs = &sc->sc_cstate[i];
664
665 if (cs->cs_method == 0)
666 continue;
667
668 str = "HALT";
669
670 if (cs->cs_method == ACPICPU_C_STATE_FFH)
671 str = "MWAIT";
672
673 if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
674 str = "I/O";
675
676 (void)snprintf(cs->cs_name, sizeof(cs->cs_name),
677 "C%d (%s)", i, str);
678
679 evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
680 NULL, device_xname(sc->sc_dev), cs->cs_name);
681 }
682
683 for (i = 0; i < sc->sc_pstate_count; i++) {
684
685 ps = &sc->sc_pstate[i];
686
687 if (ps->ps_freq == 0)
688 continue;
689
690 (void)snprintf(ps->ps_name, sizeof(ps->ps_name),
691 "P%u (%u MHz)", i, ps->ps_freq);
692
693 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC,
694 NULL, device_xname(sc->sc_dev), ps->ps_name);
695 }
696
697 for (i = 0; i < sc->sc_tstate_count; i++) {
698
699 ts = &sc->sc_tstate[i];
700
701 if (ts->ts_percent == 0)
702 continue;
703
704 (void)snprintf(ts->ts_name, sizeof(ts->ts_name),
705 "T%u (%u %%)", i, ts->ts_percent);
706
707 evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
708 NULL, device_xname(sc->sc_dev), ts->ts_name);
709 }
710 }
711
712 static void
713 acpicpu_evcnt_detach(device_t self)
714 {
715 struct acpicpu_softc *sc = device_private(self);
716 struct acpicpu_cstate *cs;
717 struct acpicpu_pstate *ps;
718 struct acpicpu_tstate *ts;
719 uint32_t i;
720
721 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
722
723 cs = &sc->sc_cstate[i];
724
725 if (cs->cs_method != 0)
726 evcnt_detach(&cs->cs_evcnt);
727 }
728
729 for (i = 0; i < sc->sc_pstate_count; i++) {
730
731 ps = &sc->sc_pstate[i];
732
733 if (ps->ps_freq != 0)
734 evcnt_detach(&ps->ps_evcnt);
735 }
736
737 for (i = 0; i < sc->sc_tstate_count; i++) {
738
739 ts = &sc->sc_tstate[i];
740
741 if (ts->ts_percent != 0)
742 evcnt_detach(&ts->ts_evcnt);
743 }
744 }
745
746 static void
747 acpicpu_debug_print(device_t self)
748 {
749 struct acpicpu_softc *sc = device_private(self);
750 struct cpu_info *ci = sc->sc_ci;
751 struct acpicpu_cstate *cs;
752 struct acpicpu_pstate *ps;
753 struct acpicpu_tstate *ts;
754 static bool once = false;
755 struct acpicpu_dep *dep;
756 uint32_t i, method;
757
758 if (once != true) {
759
760 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
761
762 cs = &sc->sc_cstate[i];
763
764 if (cs->cs_method == 0)
765 continue;
766
767 aprint_verbose_dev(sc->sc_dev, "C%d: %3s, "
768 "lat %3u us, pow %5u mW%s\n", i,
769 acpicpu_debug_print_method_c(cs->cs_method),
770 cs->cs_latency, cs->cs_power,
771 (cs->cs_flags != 0) ? ", bus master check" : "");
772 }
773
774 method = sc->sc_pstate_control.reg_spaceid;
775
776 for (i = 0; i < sc->sc_pstate_count; i++) {
777
778 ps = &sc->sc_pstate[i];
779
780 if (ps->ps_freq == 0)
781 continue;
782
783 aprint_verbose_dev(sc->sc_dev, "P%d: %3s, "
784 "lat %3u us, pow %5u mW, %4u MHz%s\n", i,
785 acpicpu_debug_print_method_pt(method),
786 ps->ps_latency, ps->ps_power, ps->ps_freq,
787 (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ?
788 ", turbo boost" : "");
789 }
790
791 method = sc->sc_tstate_control.reg_spaceid;
792
793 for (i = 0; i < sc->sc_tstate_count; i++) {
794
795 ts = &sc->sc_tstate[i];
796
797 if (ts->ts_percent == 0)
798 continue;
799
800 aprint_verbose_dev(sc->sc_dev, "T%u: %3s, "
801 "lat %3u us, pow %5u mW, %3u %%\n", i,
802 acpicpu_debug_print_method_pt(method),
803 ts->ts_latency, ts->ts_power, ts->ts_percent);
804 }
805
806 once = true;
807 }
808
809 aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, "
810 "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid,
811 (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags);
812
813 if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) {
814
815 dep = &sc->sc_cstate_dep;
816
817 aprint_debug_dev(sc->sc_dev, "C-state coordination: "
818 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
819 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
820 }
821
822 if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) {
823
824 dep = &sc->sc_pstate_dep;
825
826 aprint_debug_dev(sc->sc_dev, "P-state coordination: "
827 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
828 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
829 }
830
831 if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) {
832
833 dep = &sc->sc_tstate_dep;
834
835 aprint_debug_dev(sc->sc_dev, "T-state coordination: "
836 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus,
837 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type));
838 }
839 }
840
841 static const char *
842 acpicpu_debug_print_method_c(uint8_t val)
843 {
844
845 if (val == ACPICPU_C_STATE_FFH)
846 return "FFH";
847
848 if (val == ACPICPU_C_STATE_HALT)
849 return "HLT";
850
851 if (val == ACPICPU_C_STATE_SYSIO)
852 return "I/O";
853
854 return "???";
855 }
856
857 static const char *
858 acpicpu_debug_print_method_pt(uint8_t val)
859 {
860
861 if (val == ACPI_ADR_SPACE_SYSTEM_IO)
862 return "I/O";
863
864 if (val == ACPI_ADR_SPACE_FIXED_HARDWARE)
865 return "FFH";
866
867 return "???";
868 }
869
870 static const char *
871 acpicpu_debug_print_dep(uint32_t val)
872 {
873
874 switch (val) {
875
876 case ACPICPU_DEP_SW_ALL:
877 return "SW_ALL";
878
879 case ACPICPU_DEP_SW_ANY:
880 return "SW_ANY";
881
882 case ACPICPU_DEP_HW_ALL:
883 return "HW_ALL";
884
885 default:
886 return "unknown";
887 }
888 }
889
890 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
891
892 #ifdef _MODULE
893 #include "ioconf.c"
894 #endif
895
896 static int
897 acpicpu_modcmd(modcmd_t cmd, void *aux)
898 {
899 int rv = 0;
900
901 switch (cmd) {
902
903 case MODULE_CMD_INIT:
904
905 #ifdef _MODULE
906 rv = config_init_component(cfdriver_ioconf_acpicpu,
907 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
908 #endif
909 break;
910
911 case MODULE_CMD_FINI:
912
913 #ifdef _MODULE
914 rv = config_fini_component(cfdriver_ioconf_acpicpu,
915 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
916 #endif
917 break;
918
919 default:
920 rv = ENOTTY;
921 }
922
923 return rv;
924 }
925