acpi_cpu.c revision 1.21 1 /* $NetBSD: acpi_cpu.c,v 1.21 2010/08/27 02:44:05 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.21 2010/08/27 02:44:05 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
38 #include <sys/once.h>
39
40 #include <dev/acpi/acpireg.h>
41 #include <dev/acpi/acpivar.h>
42 #include <dev/acpi/acpi_cpu.h>
43
44 #include <machine/acpi_machdep.h>
45
46 #define _COMPONENT ACPI_BUS_COMPONENT
47 ACPI_MODULE_NAME ("acpi_cpu")
48
49 static int acpicpu_match(device_t, cfdata_t, void *);
50 static void acpicpu_attach(device_t, device_t, void *);
51 static int acpicpu_detach(device_t, int);
52 static int acpicpu_once_attach(void);
53 static int acpicpu_once_detach(void);
54 static void acpicpu_prestart(device_t);
55 static void acpicpu_start(device_t);
56
57 static int acpicpu_object(ACPI_HANDLE, struct acpicpu_object *);
58 static cpuid_t acpicpu_id(uint32_t);
59 static uint32_t acpicpu_cap(struct acpicpu_softc *);
60 static ACPI_STATUS acpicpu_cap_pdc(struct acpicpu_softc *, uint32_t);
61 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *,
62 uint32_t, uint32_t *);
63 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *);
64 static bool acpicpu_suspend(device_t, const pmf_qual_t *);
65 static bool acpicpu_resume(device_t, const pmf_qual_t *);
66
67 struct acpicpu_softc **acpicpu_sc = NULL;
68
69 static const char * const acpicpu_hid[] = {
70 "ACPI0007",
71 NULL
72 };
73
74 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc),
75 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL);
76
77 static int
78 acpicpu_match(device_t parent, cfdata_t match, void *aux)
79 {
80 struct acpi_attach_args *aa = aux;
81 struct acpicpu_object ao;
82 int rv;
83
84 if (aa->aa_node->ad_type != ACPI_TYPE_PROCESSOR)
85 return 0;
86
87 if (acpi_match_hid(aa->aa_node->ad_devinfo, acpicpu_hid) != 0)
88 return 1;
89
90 rv = acpicpu_object(aa->aa_node->ad_handle, &ao);
91
92 if (rv != 0 || acpicpu_id(ao.ao_procid) == 0xFFFFFF)
93 return 0;
94
95 return 1;
96 }
97
98 static void
99 acpicpu_attach(device_t parent, device_t self, void *aux)
100 {
101 struct acpicpu_softc *sc = device_private(self);
102 struct acpi_attach_args *aa = aux;
103 static ONCE_DECL(once_attach);
104 int rv;
105
106 rv = acpicpu_object(aa->aa_node->ad_handle, &sc->sc_object);
107
108 if (rv != 0)
109 return;
110
111 rv = RUN_ONCE(&once_attach, acpicpu_once_attach);
112
113 if (rv != 0)
114 return;
115
116 sc->sc_dev = self;
117 sc->sc_cold = true;
118 sc->sc_mapped = false;
119 sc->sc_passive = false;
120 sc->sc_iot = aa->aa_iot;
121 sc->sc_node = aa->aa_node;
122 sc->sc_cpuid = acpicpu_id(sc->sc_object.ao_procid);
123
124 if (sc->sc_cpuid == 0xFFFFFF) {
125 aprint_error(": invalid CPU ID\n");
126 return;
127 }
128
129 if (acpicpu_sc[sc->sc_cpuid] != NULL) {
130 aprint_error(": already attached\n");
131 return;
132 }
133
134 aprint_naive("\n");
135 aprint_normal(": ACPI CPU\n");
136
137 acpicpu_sc[sc->sc_cpuid] = sc;
138
139 sc->sc_cap = acpicpu_cap(sc);
140 sc->sc_flags |= acpicpu_md_quirks();
141
142 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
143
144 /*
145 * We should claim the bus space. However, we do this only
146 * to announce that the space is in use. As is noted in
147 * ichlpcib(4), we can continue our I/O without bus_space(9).
148 */
149 if (sc->sc_object.ao_pblklen == 6 && sc->sc_object.ao_pblkaddr != 0) {
150
151 rv = bus_space_map(sc->sc_iot, sc->sc_object.ao_pblkaddr,
152 sc->sc_object.ao_pblklen, 0, &sc->sc_ioh);
153
154 if (rv == 0)
155 sc->sc_mapped = true;
156 }
157
158 acpicpu_cstate_attach(self);
159 acpicpu_pstate_attach(self);
160 acpicpu_tstate_attach(self);
161
162 (void)config_defer(self, acpicpu_prestart);
163 (void)acpi_register_notify(sc->sc_node, acpicpu_notify);
164 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
165 }
166
167 static int
168 acpicpu_detach(device_t self, int flags)
169 {
170 struct acpicpu_softc *sc = device_private(self);
171 const bus_addr_t addr = sc->sc_object.ao_pblkaddr;
172 static ONCE_DECL(once_detach);
173 int rv = 0;
174
175 sc->sc_cold = true;
176 acpi_deregister_notify(sc->sc_node);
177
178 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
179 rv = acpicpu_cstate_detach(self);
180
181 if (rv != 0)
182 return rv;
183
184 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
185 rv = acpicpu_pstate_detach(self);
186
187 if (rv != 0)
188 return rv;
189
190 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
191 rv = acpicpu_tstate_detach(self);
192
193 if (rv != 0)
194 return rv;
195
196 rv = RUN_ONCE(&once_detach, acpicpu_once_detach);
197
198 if (rv != 0)
199 return rv;
200
201 if (sc->sc_mapped != false)
202 bus_space_unmap(sc->sc_iot, sc->sc_ioh, addr);
203
204 mutex_destroy(&sc->sc_mtx);
205
206 return 0;
207 }
208
209 static int
210 acpicpu_once_attach(void)
211 {
212 struct acpicpu_softc *sc;
213 unsigned int i;
214
215 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP);
216
217 if (acpicpu_sc == NULL)
218 return ENOMEM;
219
220 for (i = 0; i < maxcpus; i++)
221 acpicpu_sc[i] = NULL;
222
223 return 0;
224 }
225
226 static int
227 acpicpu_once_detach(void)
228 {
229 struct acpicpu_softc *sc;
230
231 if (acpicpu_sc != NULL)
232 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc));
233
234 return 0;
235 }
236
237 static void
238 acpicpu_prestart(device_t self)
239 {
240 struct acpicpu_softc *sc = device_private(self);
241 static bool once = false;
242
243 if (once != false) {
244 sc->sc_cold = false;
245 return;
246 }
247
248 once = true;
249
250 (void)config_interrupts(self, acpicpu_start);
251 }
252
253 static void
254 acpicpu_start(device_t self)
255 {
256 struct acpicpu_softc *sc = device_private(self);
257
258 /*
259 * Run the state-specific initialization
260 * routines. These should be called only
261 * once, after interrupts are enabled and
262 * all ACPI CPUs have attached.
263 */
264 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
265 acpicpu_cstate_start(self);
266
267 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
268 acpicpu_pstate_start(self);
269
270 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
271 acpicpu_tstate_start(self);
272
273 aprint_debug_dev(sc->sc_dev, "ACPI CPUs started (cap "
274 "0x%02x, flags 0x%06x)\n", sc->sc_cap, sc->sc_flags);
275
276 sc->sc_cold = false;
277 }
278
279 static int
280 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao)
281 {
282 ACPI_OBJECT *obj;
283 ACPI_BUFFER buf;
284 ACPI_STATUS rv;
285
286 rv = acpi_eval_struct(hdl, NULL, &buf);
287
288 if (ACPI_FAILURE(rv))
289 return 1;
290
291 obj = buf.Pointer;
292
293 if (obj->Type != ACPI_TYPE_PROCESSOR) {
294 rv = AE_TYPE;
295 goto out;
296 }
297
298 if (obj->Processor.ProcId > (uint32_t)maxcpus) {
299 rv = AE_LIMIT;
300 goto out;
301 }
302
303 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX);
304
305 if (ao != NULL) {
306 ao->ao_procid = obj->Processor.ProcId;
307 ao->ao_pblklen = obj->Processor.PblkLength;
308 ao->ao_pblkaddr = obj->Processor.PblkAddress;
309 }
310
311 out:
312 if (buf.Pointer != NULL)
313 ACPI_FREE(buf.Pointer);
314
315 return ACPI_FAILURE(rv) ? 1 : 0;
316 }
317
318 static cpuid_t
319 acpicpu_id(uint32_t id)
320 {
321 CPU_INFO_ITERATOR cii;
322 struct cpu_info *ci;
323
324 for (CPU_INFO_FOREACH(cii, ci)) {
325
326 if (id == ci->ci_acpiid)
327 return id;
328 }
329
330 return 0xFFFFFF;
331 }
332
333 static uint32_t
334 acpicpu_cap(struct acpicpu_softc *sc)
335 {
336 uint32_t flags, cap = 0;
337 const char *str;
338 ACPI_STATUS rv;
339
340 /*
341 * Query and set machine-dependent capabilities.
342 * Note that the Intel-specific _PDC method was
343 * deprecated in the ACPI 3.0 in favor of _OSC.
344 */
345 flags = acpicpu_md_cap();
346 rv = acpicpu_cap_osc(sc, flags, &cap);
347
348 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
349 str = "_OSC";
350 goto fail;
351 }
352
353 rv = acpicpu_cap_pdc(sc, flags);
354
355 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) {
356 str = "_PDC";
357 goto fail;
358 }
359
360 if (cap == 0)
361 cap = flags;
362
363 return cap;
364
365 fail:
366 aprint_error_dev(sc->sc_dev, "failed to evaluate "
367 "%s: %s\n", str, AcpiFormatException(rv));
368
369 return 0;
370 }
371
372 static ACPI_STATUS
373 acpicpu_cap_pdc(struct acpicpu_softc *sc, uint32_t flags)
374 {
375 ACPI_OBJECT_LIST arg;
376 ACPI_OBJECT obj;
377 uint32_t cap[3];
378
379 arg.Count = 1;
380 arg.Pointer = &obj;
381
382 cap[0] = ACPICPU_PDC_REVID;
383 cap[1] = 1;
384 cap[2] = flags;
385
386 obj.Type = ACPI_TYPE_BUFFER;
387 obj.Buffer.Length = sizeof(cap);
388 obj.Buffer.Pointer = (void *)cap;
389
390 return AcpiEvaluateObject(sc->sc_node->ad_handle, "_PDC", &arg, NULL);
391 }
392
393 static ACPI_STATUS
394 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val)
395 {
396 ACPI_OBJECT_LIST arg;
397 ACPI_OBJECT obj[4];
398 ACPI_OBJECT *osc;
399 ACPI_BUFFER buf;
400 ACPI_STATUS rv;
401 uint32_t cap[2];
402 uint32_t *ptr;
403 int i = 5;
404
405 static uint8_t intel_uuid[16] = {
406 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47,
407 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53
408 };
409
410 cap[0] = ACPI_OSC_QUERY;
411 cap[1] = flags;
412
413 again:
414 arg.Count = 4;
415 arg.Pointer = obj;
416
417 obj[0].Type = ACPI_TYPE_BUFFER;
418 obj[0].Buffer.Length = sizeof(intel_uuid);
419 obj[0].Buffer.Pointer = intel_uuid;
420
421 obj[1].Type = ACPI_TYPE_INTEGER;
422 obj[1].Integer.Value = ACPICPU_PDC_REVID;
423
424 obj[2].Type = ACPI_TYPE_INTEGER;
425 obj[2].Integer.Value = __arraycount(cap);
426
427 obj[3].Type = ACPI_TYPE_BUFFER;
428 obj[3].Buffer.Length = sizeof(cap);
429 obj[3].Buffer.Pointer = (void *)cap;
430
431 buf.Pointer = NULL;
432 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER;
433
434 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf);
435
436 if (ACPI_FAILURE(rv))
437 goto out;
438
439 osc = buf.Pointer;
440
441 if (osc->Type != ACPI_TYPE_BUFFER) {
442 rv = AE_TYPE;
443 goto out;
444 }
445
446 if (osc->Buffer.Length != sizeof(cap)) {
447 rv = AE_BUFFER_OVERFLOW;
448 goto out;
449 }
450
451 ptr = (uint32_t *)osc->Buffer.Pointer;
452
453 if ((ptr[0] & ACPI_OSC_ERROR) != 0) {
454 rv = AE_ERROR;
455 goto out;
456 }
457
458 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) {
459 rv = AE_BAD_PARAMETER;
460 goto out;
461 }
462
463 /*
464 * "It is strongly recommended that the OS evaluate
465 * _OSC with the Query Support Flag set until _OSC
466 * returns the Capabilities Masked bit clear, to
467 * negotiate the set of features to be granted to
468 * the OS for native support (ACPI 4.0, 6.2.10)."
469 */
470 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) {
471
472 ACPI_FREE(buf.Pointer);
473 i--;
474
475 goto again;
476 }
477
478 if ((cap[0] & ACPI_OSC_QUERY) != 0) {
479
480 ACPI_FREE(buf.Pointer);
481 cap[0] &= ~ACPI_OSC_QUERY;
482
483 goto again;
484 }
485
486 /*
487 * It is permitted for _OSC to return all
488 * bits cleared, but this is specified to
489 * vary on per-device basis. Assume that
490 * everything rather than nothing will be
491 * supported in thise case; we do not need
492 * the firmware to know the CPU features.
493 */
494 *val = (ptr[1] != 0) ? ptr[1] : cap[1];
495
496 out:
497 if (buf.Pointer != NULL)
498 ACPI_FREE(buf.Pointer);
499
500 return rv;
501 }
502
503 static void
504 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux)
505 {
506 ACPI_OSD_EXEC_CALLBACK func;
507 struct acpicpu_softc *sc;
508 device_t self = aux;
509
510 sc = device_private(self);
511
512 if (sc->sc_cold != false)
513 return;
514
515 switch (evt) {
516
517 case ACPICPU_C_NOTIFY:
518
519 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
520 return;
521
522 func = acpicpu_cstate_callback;
523 break;
524
525 case ACPICPU_P_NOTIFY:
526
527 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
528 return;
529
530 func = acpicpu_pstate_callback;
531 break;
532
533 case ACPICPU_T_NOTIFY:
534
535 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
536 return;
537
538 func = acpicpu_tstate_callback;
539 break;
540
541 default:
542 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt);
543 return;
544 }
545
546 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
547 }
548
549 static bool
550 acpicpu_suspend(device_t self, const pmf_qual_t *qual)
551 {
552 struct acpicpu_softc *sc = device_private(self);
553
554 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
555 (void)acpicpu_cstate_suspend(self);
556
557 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
558 (void)acpicpu_pstate_suspend(self);
559
560 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
561 (void)acpicpu_tstate_suspend(self);
562
563 sc->sc_cold = true;
564
565 return true;
566 }
567
568 static bool
569 acpicpu_resume(device_t self, const pmf_qual_t *qual)
570 {
571 struct acpicpu_softc *sc = device_private(self);
572
573 sc->sc_cold = false;
574
575 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0)
576 (void)acpicpu_cstate_resume(self);
577
578 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
579 (void)acpicpu_pstate_resume(self);
580
581 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
582 (void)acpicpu_tstate_resume(self);
583
584 return true;
585 }
586
587 #ifdef _MODULE
588
589 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL);
590 CFDRIVER_DECL(acpicpu, DV_DULL, NULL);
591
592 static int acpicpuloc[] = { -1 };
593 extern struct cfattach acpicpu_ca;
594
595 static struct cfparent acpiparent = {
596 "acpinodebus", NULL, DVUNIT_ANY
597 };
598
599 static struct cfdata acpicpu_cfdata[] = {
600 {
601 .cf_name = "acpicpu",
602 .cf_atname = "acpicpu",
603 .cf_unit = 0,
604 .cf_fstate = FSTATE_STAR,
605 .cf_loc = acpicpuloc,
606 .cf_flags = 0,
607 .cf_pspec = &acpiparent,
608 },
609
610 { NULL, NULL, 0, 0, NULL, 0, NULL }
611 };
612
613 static int
614 acpicpu_modcmd(modcmd_t cmd, void *context)
615 {
616 int err;
617
618 switch (cmd) {
619
620 case MODULE_CMD_INIT:
621
622 err = config_cfdriver_attach(&acpicpu_cd);
623
624 if (err != 0)
625 return err;
626
627 err = config_cfattach_attach("acpicpu", &acpicpu_ca);
628
629 if (err != 0) {
630 config_cfdriver_detach(&acpicpu_cd);
631 return err;
632 }
633
634 err = config_cfdata_attach(acpicpu_cfdata, 1);
635
636 if (err != 0) {
637 config_cfattach_detach("acpicpu", &acpicpu_ca);
638 config_cfdriver_detach(&acpicpu_cd);
639 return err;
640 }
641
642 return 0;
643
644 case MODULE_CMD_FINI:
645
646 err = config_cfdata_detach(acpicpu_cfdata);
647
648 if (err != 0)
649 return err;
650
651 config_cfattach_detach("acpicpu", &acpicpu_ca);
652 config_cfdriver_detach(&acpicpu_cd);
653
654 return 0;
655
656 default:
657 return ENOTTY;
658 }
659 }
660
661 #endif /* _MODULE */
662