acpi_cpu.c revision 1.28 1 /* $NetBSD: acpi_cpu.c,v 1.28 2011/02/25 12:08:35 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.28 2011/02/25 12:08:35 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/once.h>
39 #include <sys/sysctl.h>
40
41 #include <dev/acpi/acpireg.h>
42 #include <dev/acpi/acpivar.h>
43 #include <dev/acpi/acpi_cpu.h>
44
45 #include <machine/acpi_machdep.h>
46
47 #define _COMPONENT ACPI_BUS_COMPONENT
48 ACPI_MODULE_NAME ("acpi_cpu")
49
50 static int acpicpu_match(device_t, cfdata_t, void *);
51 static void acpicpu_attach(device_t, device_t, void *);
52 static int acpicpu_detach(device_t, int);
53 static int acpicpu_once_attach(void);
54 static int acpicpu_once_detach(void);
55 static void acpicpu_prestart(device_t);
56 static void acpicpu_start(device_t);
57 static void acpicpu_sysctl(device_t);
58
59 static int acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
60 static struct cpu_info *acpicpu_ci(uint32_t);
61 static uint32_t acpicpu_cap(struct acpicpu_softc *);
62 static ACPI_STATUS acpicpu_cap_pdc(struct acpicpu_softc *, uint32_t);
63 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *,
64 uint32_t, uint32_t *);
65 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
66 static bool acpicpu_suspend(device_t, const pmf_qual_t *);
67 static bool acpicpu_resume(device_t, const pmf_qual_t *);
68
69 extern uint32_t acpi_cpus;
70 struct acpicpu_softc **acpicpu_sc = NULL;
71 static struct sysctllog *acpicpu_log = NULL;
72 static bool acpicpu_dynamic = true;
73 static bool acpicpu_passive = true;
74
75 static const char * const acpicpu_hid[] = {
76 "ACPI0007",
77 NULL
78 };
79
80 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
81 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
82
83 static int
84 acpicpu_match(device_t parent, cfdata_t match, void *aux)
85 {
86 struct acpi_attach_args *aa = aux;
87 struct acpicpu_object ao;
88 int rv;
89
90 if (aa->aa_node->ad_type != ACPI_TYPE_PROCESSOR)
91 return 0;
92
93 if (acpi_match_hid(aa->aa_node->ad_devinfo, acpicpu_hid) != 0)
94 return 1;
95
96 rv = acpicpu_object(aa->aa_node->ad_handle, &ao);
97
98 if (rv != 0 || acpicpu_ci(ao.ao_procid) == NULL)
99 return 0;
100
101 return 1;
102 }
103
104 static void
105 acpicpu_attach(device_t parent, device_t self, void *aux)
106 {
107 struct acpicpu_softc *sc = device_private(self);
108 struct acpi_attach_args *aa = aux;
109 static ONCE_DECL(once_attach);
110 cpuid_t id;
111 int rv;
112
113 rv = acpicpu_object(aa->aa_node->ad_handle, &sc->sc_object);
114
115 if (rv != 0)
116 return;
117
118 rv = RUN_ONCE(&once_attach, acpicpu_once_attach);
119
120 if (rv != 0)
121 return;
122
123 sc->sc_dev = self;
124 sc->sc_cold = true;
125 sc->sc_node = aa->aa_node;
126
127 sc->sc_ci = acpicpu_ci(sc->sc_object.ao_procid);
128
129 if (sc->sc_ci == NULL) {
130 aprint_error(": invalid CPU\n");
131 return;
132 }
133
134 id = sc->sc_ci->ci_acpiid;
135
136 if (acpicpu_sc[id] != NULL) {
137 aprint_error(": already attached\n");
138 return;
139 }
140
141 aprint_naive("\n");
142 aprint_normal(": ACPI CPU\n");
143
144 acpi_cpus++;
145 acpicpu_sc[id] = sc;
146
147 sc->sc_cap = acpicpu_cap(sc);
148 sc->sc_flags |= acpicpu_md_flags();
149
150 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
151
152 acpicpu_cstate_attach(self);
153 acpicpu_pstate_attach(self);
154 acpicpu_tstate_attach(self);
155
156 (void)config_defer(self, acpicpu_prestart);
157 (void)acpi_register_notify(sc->sc_node, acpicpu_notify);
158 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
159 }
160
161 static int
162 acpicpu_detach(device_t self, int flags)
163 {
164 struct acpicpu_softc *sc = device_private(self);
165 static ONCE_DECL(once_detach);
166 int rv = 0;
167
168 sc->sc_cold = true;
169 acpi_deregister_notify(sc->sc_node);
170
171 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
172 rv = acpicpu_cstate_detach(self);
173
174 if (rv != 0)
175 return rv;
176
177 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
178 rv = acpicpu_pstate_detach(self);
179
180 if (rv != 0)
181 return rv;
182
183 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
184 rv = acpicpu_tstate_detach(self);
185
186 if (rv != 0)
187 return rv;
188
189 rv = RUN_ONCE(&once_detach, acpicpu_once_detach);
190
191 if (rv != 0)
192 return rv;
193
194 mutex_destroy(&sc->sc_mtx);
195 acpi_cpus--;
196
197 return 0;
198 }
199
200 static int
201 acpicpu_once_attach(void)
202 {
203 struct acpicpu_softc *sc;
204 unsigned int i;
205
206 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
207
208 if (acpicpu_sc == NULL)
209 return ENOMEM;
210
211 for (i = 0; i < maxcpus; i++)
212 acpicpu_sc[i] = NULL;
213
214 return 0;
215 }
216
217 static int
218 acpicpu_once_detach(void)
219 {
220 struct acpicpu_softc *sc;
221
222 if (acpicpu_sc != NULL)
223 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
224
225 if (acpicpu_log != NULL)
226 sysctl_teardown(&acpicpu_log);
227
228 return 0;
229 }
230
231 static void
232 acpicpu_prestart(device_t self)
233 {
234 struct acpicpu_softc *sc = device_private(self);
235 static bool once = false;
236
237 if (once != false) {
238 sc->sc_cold = false;
239 return;
240 }
241
242 once = true;
243
244 (void)config_interrupts(self, acpicpu_start);
245 }
246
247 static void
248 acpicpu_start(device_t self)
249 {
250 struct acpicpu_softc *sc = device_private(self);
251
252 /*
253 * Run the state-specific initialization
254 * routines. These should be called only
255 * once, after interrupts are enabled and
256 * all ACPI CPUs have attached.
257 */
258 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
259 acpicpu_cstate_start(self);
260
261 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
262 acpicpu_pstate_start(self);
263
264 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
265 acpicpu_tstate_start(self);
266
267 acpicpu_sysctl(self);
268
269 aprint_debug_dev(sc->sc_dev, "ACPI CPUs started (cap "
270 "0x%02x, flags 0x%06x)\n", sc->sc_cap, sc->sc_flags);
271
272 sc->sc_cold = false;
273 }
274
275 static void
276 acpicpu_sysctl(device_t self)
277 {
278 const struct sysctlnode *node;
279 int err;
280
281 err = sysctl_createv(&acpicpu_log, 0, NULL, &node,
282 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
283 NULL, 0, NULL, 0, CTL_HW, CTL_EOL);
284
285 if (err != 0)
286 goto fail;
287
288 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
289 CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL,
290 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
291
292 if (err != 0)
293 goto fail;
294
295 err = sysctl_createv(&acpicpu_log, 0, &node, &node,
296 0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"),
297 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
298
299 if (err != 0)
300 goto fail;
301
302 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
303 CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic",
304 SYSCTL_DESCR("Dynamic states"), NULL, 0,
305 &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL);
306
307 if (err != 0)
308 goto fail;
309
310 err = sysctl_createv(&acpicpu_log, 0, &node, NULL,
311 CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive",
312 SYSCTL_DESCR("Passive cooling"), NULL, 0,
313 &acpicpu_passive, 0, CTL_CREATE, CTL_EOL);
314
315 if (err != 0)
316 goto fail;
317
318 return;
319
320 fail:
321 aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err);
322 }
323
324 static int
325 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
326 {
327 ACPI_OBJECT *obj;
328 ACPI_BUFFER buf;
329 ACPI_STATUS rv;
330
331 rv = acpi_eval_struct(hdl, NULL, &buf);
332
333 if (ACPI_FAILURE(rv))
334 return 1;
335
336 obj = buf.Pointer;
337
338 if (obj->Type != ACPI_TYPE_PROCESSOR) {
339 rv = AE_TYPE;
340 goto out;
341 }
342
343 if (obj->Processor.ProcId > (uint32_t)maxcpus) {
344 rv = AE_LIMIT;
345 goto out;
346 }
347
348 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
349
350 if (ao != NULL) {
351 ao->ao_procid = obj->Processor.ProcId;
352 ao->ao_pblklen = obj->Processor.PblkLength;
353 ao->ao_pblkaddr = obj->Processor.PblkAddress;
354 }
355
356 out:
357 if (buf.Pointer != NULL)
358 ACPI_FREE(buf.Pointer);
359
360 return ACPI_FAILURE(rv) ? 1 : 0;
361 }
362
363 static struct cpu_info *
364 acpicpu_ci(uint32_t id)
365 {
366 CPU_INFO_ITERATOR cii;
367 struct cpu_info *ci;
368
369 for (CPU_INFO_FOREACH(cii, ci)) {
370
371 if (id == ci->ci_acpiid)
372 return ci;
373 }
374
375 return NULL;
376 }
377
378 static uint32_t
379 acpicpu_cap(struct acpicpu_softc *sc)
380 {
381 uint32_t flags, cap = 0;
382 const char *str;
383 ACPI_STATUS rv;
384
385 /*
386 * Query and set machine-dependent capabilities.
387 * Note that the Intel-specific _PDC method was
388 * deprecated in the ACPI 3.0 in favor of _OSC.
389 */
390 flags = acpicpu_md_cap();
391 rv = acpicpu_cap_osc(sc, flags, &cap);
392
393 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
394 str = "_OSC";
395 goto fail;
396 }
397
398 rv = acpicpu_cap_pdc(sc, flags);
399
400 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
401 str = "_PDC";
402 goto fail;
403 }
404
405 if (cap == 0)
406 cap = flags;
407
408 return cap;
409
410 fail:
411 aprint_error_dev(sc->sc_dev, "failed to evaluate "
412 "%s: %s\n", str, AcpiFormatException(rv));
413
414 return 0;
415 }
416
417 static ACPI_STATUS
418 acpicpu_cap_pdc(struct acpicpu_softc *sc, uint32_t flags)
419 {
420 ACPI_OBJECT_LIST arg;
421 ACPI_OBJECT obj;
422 uint32_t cap[3];
423
424 arg.Count = 1;
425 arg.Pointer = &obj;
426
427 cap[0] = ACPICPU_PDC_REVID;
428 cap[1] = 1;
429 cap[2] = flags;
430
431 obj.Type = ACPI_TYPE_BUFFER;
432 obj.Buffer.Length = sizeof(cap);
433 obj.Buffer.Pointer = (void *)cap;
434
435 return AcpiEvaluateObject(sc->sc_node->ad_handle, "_PDC", &arg, NULL);
436 }
437
438 static ACPI_STATUS
439 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
440 {
441 ACPI_OBJECT_LIST arg;
442 ACPI_OBJECT obj[4];
443 ACPI_OBJECT *osc;
444 ACPI_BUFFER buf;
445 ACPI_STATUS rv;
446 uint32_t cap[2];
447 uint32_t *ptr;
448 int i = 5;
449
450 static uint8_t intel_uuid[16] = {
451 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
452 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
453 };
454
455 cap[0] = ACPI_OSC_QUERY;
456 cap[1] = flags;
457
458 again:
459 arg.Count = 4;
460 arg.Pointer = obj;
461
462 obj[0].Type = ACPI_TYPE_BUFFER;
463 obj[0].Buffer.Length = sizeof(intel_uuid);
464 obj[0].Buffer.Pointer = intel_uuid;
465
466 obj[1].Type = ACPI_TYPE_INTEGER;
467 obj[1].Integer.Value = ACPICPU_PDC_REVID;
468
469 obj[2].Type = ACPI_TYPE_INTEGER;
470 obj[2].Integer.Value = __arraycount(cap);
471
472 obj[3].Type = ACPI_TYPE_BUFFER;
473 obj[3].Buffer.Length = sizeof(cap);
474 obj[3].Buffer.Pointer = (void *)cap;
475
476 buf.Pointer = NULL;
477 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
478
479 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
480
481 if (ACPI_FAILURE(rv))
482 goto out;
483
484 osc = buf.Pointer;
485
486 if (osc->Type != ACPI_TYPE_BUFFER) {
487 rv = AE_TYPE;
488 goto out;
489 }
490
491 if (osc->Buffer.Length != sizeof(cap)) {
492 rv = AE_BUFFER_OVERFLOW;
493 goto out;
494 }
495
496 ptr = (uint32_t *)osc->Buffer.Pointer;
497
498 if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
499 rv = AE_ERROR;
500 goto out;
501 }
502
503 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
504 rv = AE_BAD_PARAMETER;
505 goto out;
506 }
507
508 /*
509 * "It is strongly recommended that the OS evaluate
510 * _OSC with the Query Support Flag set until _OSC
511 * returns the Capabilities Masked bit clear, to
512 * negotiate the set of features to be granted to
513 * the OS for native support (ACPI 4.0, 6.2.10)."
514 */
515 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
516
517 ACPI_FREE(buf.Pointer);
518 i--;
519
520 goto again;
521 }
522
523 if ((cap[0] & ACPI_OSC_QUERY) != 0) {
524
525 ACPI_FREE(buf.Pointer);
526 cap[0] &= ~ACPI_OSC_QUERY;
527
528 goto again;
529 }
530
531 /*
532 * It is permitted for _OSC to return all
533 * bits cleared, but this is specified to
534 * vary on per-device basis. Assume that
535 * everything rather than nothing will be
536 * supported in this case; we do not need
537 * the firmware to know the CPU features.
538 */
539 *val = (ptr[1] != 0) ? ptr[1] : cap[1];
540
541 out:
542 if (buf.Pointer != NULL)
543 ACPI_FREE(buf.Pointer);
544
545 return rv;
546 }
547
548 static void
549 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
550 {
551 ACPI_OSD_EXEC_CALLBACK func;
552 struct acpicpu_softc *sc;
553 device_t self = aux;
554
555 sc = device_private(self);
556
557 if (sc->sc_cold != false)
558 return;
559
560 if (acpicpu_dynamic != true)
561 return;
562
563 switch (evt) {
564
565 case ACPICPU_C_NOTIFY:
566
567 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
568 return;
569
570 func = acpicpu_cstate_callback;
571 break;
572
573 case ACPICPU_P_NOTIFY:
574
575 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
576 return;
577
578 func = acpicpu_pstate_callback;
579 break;
580
581 case ACPICPU_T_NOTIFY:
582
583 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
584 return;
585
586 func = acpicpu_tstate_callback;
587 break;
588
589 default:
590 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt);
591 return;
592 }
593
594 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
595 }
596
597 static bool
598 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
599 {
600 struct acpicpu_softc *sc = device_private(self);
601
602 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
603 (void)acpicpu_cstate_suspend(self);
604
605 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
606 (void)acpicpu_pstate_suspend(self);
607
608 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
609 (void)acpicpu_tstate_suspend(self);
610
611 sc->sc_cold = true;
612
613 return true;
614 }
615
616 static bool
617 acpicpu_resume(device_t self, const pmf_qual_t *qual)
618 {
619 struct acpicpu_softc *sc = device_private(self);
620
621 sc->sc_cold = false;
622
623 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
624 (void)acpicpu_cstate_resume(self);
625
626 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
627 (void)acpicpu_pstate_resume(self);
628
629 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
630 (void)acpicpu_tstate_resume(self);
631
632 return true;
633 }
634
635 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
636
637 #ifdef _MODULE
638 #include "ioconf.c"
639 #endif
640
641 static int
642 acpicpu_modcmd(modcmd_t cmd, void *aux)
643 {
644 int rv = 0;
645
646 switch (cmd) {
647
648 case MODULE_CMD_INIT:
649
650 #ifdef _MODULE
651 rv = config_init_component(cfdriver_ioconf_acpicpu,
652 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
653 #endif
654 break;
655
656 case MODULE_CMD_FINI:
657
658 #ifdef _MODULE
659 rv = config_fini_component(cfdriver_ioconf_acpicpu,
660 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu);
661 #endif
662 break;
663
664 default:
665 rv = ENOTTY;
666 }
667
668 return rv;
669 }
670