acpi_cpu_cstate.c revision 1.11 1 /* $NetBSD: acpi_cpu_cstate.c,v 1.11 2010/07/27 05:11:33 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.11 2010/07/27 05:11:33 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/device.h>
35 #include <sys/kernel.h>
36 #include <sys/once.h>
37 #include <sys/timetc.h>
38
39 #include <dev/pci/pcivar.h>
40 #include <dev/pci/pcidevs.h>
41
42 #include <dev/acpi/acpireg.h>
43 #include <dev/acpi/acpivar.h>
44 #include <dev/acpi/acpi_cpu.h>
45 #include <dev/acpi/acpi_timer.h>
46
47 #include <machine/acpi_machdep.h>
48
49 #define _COMPONENT ACPI_BUS_COMPONENT
50 ACPI_MODULE_NAME ("acpi_cpu_cstate")
51
52 static void acpicpu_cstate_attach_print(struct acpicpu_softc *);
53 static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *);
54 static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *,
55 ACPI_OBJECT *);
56 static void acpicpu_cstate_cst_bios(void);
57 static ACPI_STATUS acpicpu_cstate_csd(ACPI_HANDLE, struct acpicpu_dep *);
58 static void acpicpu_cstate_fadt(struct acpicpu_softc *);
59 static void acpicpu_cstate_quirks(struct acpicpu_softc *);
60 static int acpicpu_cstate_quirks_piix4(struct pci_attach_args *);
61 static int acpicpu_cstate_latency(struct acpicpu_softc *);
62 static bool acpicpu_cstate_bm_check(void);
63 static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
64
65 extern struct acpicpu_softc **acpicpu_sc;
66 extern int acpi_suspended;
67
68 /*
69 * XXX: The local APIC timer (as well as TSC) is typically
70 * stopped in C3. For now, we cannot but disable C3.
71 */
72 #ifdef ACPICPU_ENABLE_C3
73 static int cs_state_max = ACPI_STATE_C3;
74 #else
75 static int cs_state_max = ACPI_STATE_C2;
76 #endif
77
78 void
79 acpicpu_cstate_attach(device_t self)
80 {
81 struct acpicpu_softc *sc = device_private(self);
82 ACPI_STATUS rv;
83
84 /*
85 * Either use the preferred _CST or resort to FADT.
86 */
87 rv = acpicpu_cstate_cst(sc);
88
89 switch (rv) {
90
91 case AE_OK:
92 sc->sc_flags |= ACPICPU_FLAG_C_CST;
93 acpicpu_cstate_cst_bios();
94 break;
95
96 default:
97 sc->sc_flags |= ACPICPU_FLAG_C_FADT;
98 acpicpu_cstate_fadt(sc);
99 break;
100 }
101
102 acpicpu_cstate_quirks(sc);
103 acpicpu_cstate_attach_print(sc);
104 }
105
106 void
107 acpicpu_cstate_attach_print(struct acpicpu_softc *sc)
108 {
109 struct acpicpu_cstate *cs;
110 struct acpicpu_dep dep;
111 const char *method;
112 ACPI_STATUS rv;
113 int i;
114
115 (void)memset(&dep, 0, sizeof(struct acpicpu_dep));
116
117 rv = acpicpu_cstate_csd(sc->sc_node->ad_handle, &dep);
118
119 if (ACPI_SUCCESS(rv)) {
120 aprint_debug_dev(sc->sc_dev, "C%u: _CSD, "
121 "domain 0x%02x / 0x%02x, type 0x%02x\n",
122 dep.dep_index, dep.dep_domain,
123 dep.dep_ncpu, dep.dep_coord);
124 }
125
126 aprint_debug_dev(sc->sc_dev, "Cx: %5s",
127 (sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0 ? "FADT" : "_CST");
128
129 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
130 aprint_debug(", BM control");
131
132 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
133 aprint_debug(", BM arbitration");
134
135 if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0)
136 aprint_debug(", C1E");
137
138 if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0)
139 aprint_debug(", C3 disabled (quirk)");
140
141 aprint_debug("\n");
142
143 for (i = 0; i < ACPI_C_STATE_COUNT; i++) {
144
145 cs = &sc->sc_cstate[i];
146
147 if (cs->cs_method == 0)
148 continue;
149
150 switch (cs->cs_method) {
151
152 case ACPICPU_C_STATE_HALT:
153 method = "HALT";
154 break;
155
156 case ACPICPU_C_STATE_FFH:
157 method = "FFH";
158 break;
159
160 case ACPICPU_C_STATE_SYSIO:
161 method = "SYSIO";
162 break;
163
164 default:
165 panic("NOTREACHED");
166 }
167
168 aprint_debug_dev(sc->sc_dev, "C%d: %5s, "
169 "latency %4u, power %4u, addr 0x%06x, flags 0x%02x\n",
170 i, method, cs->cs_latency, cs->cs_power,
171 (uint32_t)cs->cs_addr, cs->cs_flags);
172 }
173 }
174
175 int
176 acpicpu_cstate_detach(device_t self)
177 {
178 static ONCE_DECL(once_detach);
179
180 return RUN_ONCE(&once_detach, acpicpu_md_idle_stop);
181 }
182
183 int
184 acpicpu_cstate_start(device_t self)
185 {
186 struct acpicpu_softc *sc = device_private(self);
187 static ONCE_DECL(once_start);
188 static ONCE_DECL(once_save);
189 int rv;
190
191 /*
192 * Save the existing idle-mechanism and claim the idle_loop(9).
193 * This should be called after all ACPI CPUs have been attached.
194 */
195 rv = RUN_ONCE(&once_save, acpicpu_md_idle_init);
196
197 if (rv != 0)
198 return rv;
199
200 rv = RUN_ONCE(&once_start, acpicpu_md_idle_start);
201
202 if (rv == 0)
203 sc->sc_flags |= ACPICPU_FLAG_C;
204
205 return rv;
206 }
207
208 bool
209 acpicpu_cstate_suspend(device_t self)
210 {
211
212 return true;
213 }
214
215 bool
216 acpicpu_cstate_resume(device_t self)
217 {
218 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
219 struct acpicpu_softc *sc = device_private(self);
220
221 KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0);
222
223 if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0)
224 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
225
226 return true;
227 }
228
229 void
230 acpicpu_cstate_callback(void *aux)
231 {
232 struct acpicpu_softc *sc;
233 device_t self = aux;
234
235 sc = device_private(self);
236
237 KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0);
238
239 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) {
240 KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0);
241 return;
242 }
243
244 (void)acpicpu_md_idle_stop();
245 (void)acpicpu_cstate_cst(sc);
246 (void)acpicpu_md_idle_start();
247 }
248
249 static ACPI_STATUS
250 acpicpu_cstate_cst(struct acpicpu_softc *sc)
251 {
252 ACPI_OBJECT *elm, *obj;
253 ACPI_BUFFER buf;
254 ACPI_STATUS rv;
255 uint32_t i, n;
256 uint8_t count;
257
258 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
259
260 if (ACPI_FAILURE(rv))
261 return rv;
262
263 obj = buf.Pointer;
264
265 if (obj->Type != ACPI_TYPE_PACKAGE) {
266 rv = AE_TYPE;
267 goto out;
268 }
269
270 if (obj->Package.Count < 2) {
271 rv = AE_LIMIT;
272 goto out;
273 }
274
275 elm = obj->Package.Elements;
276
277 if (elm[0].Type != ACPI_TYPE_INTEGER) {
278 rv = AE_TYPE;
279 goto out;
280 }
281
282 n = elm[0].Integer.Value;
283
284 if (n != obj->Package.Count - 1) {
285 rv = AE_BAD_VALUE;
286 goto out;
287 }
288
289 if (n > ACPI_C_STATES_MAX) {
290 rv = AE_LIMIT;
291 goto out;
292 }
293
294 (void)memset(sc->sc_cstate, 0,
295 sizeof(*sc->sc_cstate) * ACPI_C_STATE_COUNT);
296
297 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
298 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
299
300 for (count = 0, i = 1; i <= n; i++) {
301
302 elm = &obj->Package.Elements[i];
303 rv = acpicpu_cstate_cst_add(sc, elm);
304
305 if (ACPI_SUCCESS(rv))
306 count++;
307 }
308
309 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
310
311 out:
312 if (buf.Pointer != NULL)
313 ACPI_FREE(buf.Pointer);
314
315 if (ACPI_FAILURE(rv))
316 aprint_error_dev(sc->sc_dev, "failed to evaluate "
317 "_CST: %s\n", AcpiFormatException(rv));
318
319 return rv;
320 }
321
322 static ACPI_STATUS
323 acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm)
324 {
325 const struct acpicpu_object *ao = &sc->sc_object;
326 struct acpicpu_cstate *cs = sc->sc_cstate;
327 struct acpicpu_cstate state;
328 struct acpicpu_reg *reg;
329 ACPI_STATUS rv = AE_OK;
330 ACPI_OBJECT *obj;
331 uint32_t type;
332
333 (void)memset(&state, 0, sizeof(*cs));
334
335 state.cs_flags = ACPICPU_FLAG_C_BM_STS;
336
337 if (elm->Type != ACPI_TYPE_PACKAGE) {
338 rv = AE_TYPE;
339 goto out;
340 }
341
342 if (elm->Package.Count != 4) {
343 rv = AE_LIMIT;
344 goto out;
345 }
346
347 /*
348 * Type.
349 */
350 obj = &elm->Package.Elements[1];
351
352 if (obj->Type != ACPI_TYPE_INTEGER) {
353 rv = AE_TYPE;
354 goto out;
355 }
356
357 type = obj->Integer.Value;
358
359 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
360 rv = AE_TYPE;
361 goto out;
362 }
363
364 /*
365 * Latency.
366 */
367 obj = &elm->Package.Elements[2];
368
369 if (obj->Type != ACPI_TYPE_INTEGER) {
370 rv = AE_TYPE;
371 goto out;
372 }
373
374 state.cs_latency = obj->Integer.Value;
375
376 /*
377 * Power.
378 */
379 obj = &elm->Package.Elements[3];
380
381 if (obj->Type != ACPI_TYPE_INTEGER) {
382 rv = AE_TYPE;
383 goto out;
384 }
385
386 state.cs_power = obj->Integer.Value;
387
388 /*
389 * Register.
390 */
391 obj = &elm->Package.Elements[0];
392
393 if (obj->Type != ACPI_TYPE_BUFFER) {
394 rv = AE_TYPE;
395 goto out;
396 }
397
398 CTASSERT(sizeof(struct acpicpu_reg) == 15);
399
400 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
401 rv = AE_LIMIT;
402 goto out;
403 }
404
405 reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
406
407 switch (reg->reg_spaceid) {
408
409 case ACPI_ADR_SPACE_SYSTEM_IO:
410 state.cs_method = ACPICPU_C_STATE_SYSIO;
411
412 if (reg->reg_addr == 0) {
413 rv = AE_AML_ILLEGAL_ADDRESS;
414 goto out;
415 }
416
417 if (reg->reg_bitwidth != 8) {
418 rv = AE_AML_BAD_RESOURCE_LENGTH;
419 goto out;
420 }
421
422 /*
423 * Check only that the address is in the mapped space.
424 * Systems are allowed to change it when operating
425 * with _CST (see ACPI 4.0, pp. 94-95). For instance,
426 * the offset of P_LVL3 may change depending on whether
427 * acpiacad(4) is connected or disconnected.
428 */
429 if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) {
430 rv = AE_BAD_ADDRESS;
431 goto out;
432 }
433
434 state.cs_addr = reg->reg_addr;
435 break;
436
437 case ACPI_ADR_SPACE_FIXED_HARDWARE:
438 state.cs_method = ACPICPU_C_STATE_FFH;
439
440 switch (type) {
441
442 case ACPI_STATE_C1:
443
444 if ((sc->sc_flags & ACPICPU_FLAG_C_MWAIT) == 0)
445 state.cs_method = ACPICPU_C_STATE_HALT;
446
447 break;
448
449 default:
450
451 if ((sc->sc_flags & ACPICPU_FLAG_C_MWAIT) == 0) {
452 rv = AE_AML_BAD_RESOURCE_VALUE;
453 goto out;
454 }
455 }
456
457 if (sc->sc_cap != 0) {
458
459 /*
460 * The _CST FFH GAS encoding may contain
461 * additional hints on Intel processors.
462 * Use these to determine whether we can
463 * avoid the bus master activity check.
464 */
465 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
466 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
467 }
468
469 break;
470
471 default:
472 rv = AE_AML_INVALID_SPACE_ID;
473 goto out;
474 }
475
476 if (cs[type].cs_method != 0) {
477 rv = AE_ALREADY_EXISTS;
478 goto out;
479 }
480
481 cs[type].cs_addr = state.cs_addr;
482 cs[type].cs_power = state.cs_power;
483 cs[type].cs_flags = state.cs_flags;
484 cs[type].cs_method = state.cs_method;
485 cs[type].cs_latency = state.cs_latency;
486
487 out:
488 if (ACPI_FAILURE(rv))
489 aprint_verbose_dev(sc->sc_dev,
490 "invalid _CST: %s\n", AcpiFormatException(rv));
491
492 return rv;
493 }
494
495 static void
496 acpicpu_cstate_cst_bios(void)
497 {
498 const uint8_t val = AcpiGbl_FADT.CstControl;
499 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
500
501 if (addr == 0)
502 return;
503
504 (void)AcpiOsWritePort(addr, val, 8);
505 }
506
507 static ACPI_STATUS
508 acpicpu_cstate_csd(ACPI_HANDLE hdl, struct acpicpu_dep *dep)
509 {
510 ACPI_OBJECT *elm, *obj;
511 ACPI_BUFFER buf;
512 ACPI_STATUS rv;
513 int i, n;
514
515 /*
516 * Query the optional _CSD for heuristics.
517 */
518 rv = acpi_eval_struct(hdl, "_CSD", &buf);
519
520 if (ACPI_FAILURE(rv))
521 return rv;
522
523 obj = buf.Pointer;
524
525 if (obj->Type != ACPI_TYPE_PACKAGE) {
526 rv = AE_TYPE;
527 goto out;
528 }
529
530 n = obj->Package.Count;
531
532 if (n != 6) {
533 rv = AE_LIMIT;
534 goto out;
535 }
536
537 elm = obj->Package.Elements;
538
539 for (i = 0; i < n; i++) {
540
541 if (elm[i].Type != ACPI_TYPE_INTEGER) {
542 rv = AE_TYPE;
543 goto out;
544 }
545
546 KDASSERT((uint64_t)elm[i].Integer.Value <= UINT32_MAX);
547 }
548
549 if (elm[0].Integer.Value != 6 || elm[1].Integer.Value != 0) {
550 rv = AE_BAD_DATA;
551 goto out;
552 }
553
554 dep->dep_domain = elm[2].Integer.Value;
555 dep->dep_coord = elm[3].Integer.Value;
556 dep->dep_ncpu = elm[4].Integer.Value;
557 dep->dep_index = elm[5].Integer.Value;
558
559 out:
560 if (buf.Pointer != NULL)
561 ACPI_FREE(buf.Pointer);
562
563 return rv;
564 }
565
566 static void
567 acpicpu_cstate_fadt(struct acpicpu_softc *sc)
568 {
569 struct acpicpu_cstate *cs = sc->sc_cstate;
570
571 (void)memset(cs, 0, sizeof(*cs) * ACPI_C_STATE_COUNT);
572
573 /*
574 * All x86 processors should support C1 (a.k.a. HALT).
575 */
576 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0)
577 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
578
579 if ((acpicpu_md_cpus_running() > 1) &&
580 (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
581 return;
582
583 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
584 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
585
586 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
587 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
588
589 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
590 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
591
592 /*
593 * The P_BLK length should always be 6. If it
594 * is not, reduce functionality accordingly.
595 * Sanity check also FADT's latency levels.
596 */
597 if (sc->sc_object.ao_pblklen < 5)
598 cs[ACPI_STATE_C2].cs_method = 0;
599
600 if (sc->sc_object.ao_pblklen < 6)
601 cs[ACPI_STATE_C3].cs_method = 0;
602
603 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
604 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
605
606 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
607 cs[ACPI_STATE_C2].cs_method = 0;
608
609 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
610 cs[ACPI_STATE_C3].cs_method = 0;
611 }
612
613 static void
614 acpicpu_cstate_quirks(struct acpicpu_softc *sc)
615 {
616 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
617 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
618 struct pci_attach_args pa;
619
620 /*
621 * Check bus master arbitration. If ARB_DIS
622 * is not available, processor caches must be
623 * flushed before C3 (ACPI 4.0, section 8.2).
624 */
625 if (reg != 0 && len != 0)
626 sc->sc_flags |= ACPICPU_FLAG_C_ARB;
627 else {
628 /*
629 * Disable C3 entirely if WBINVD is not present.
630 */
631 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
632 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
633 else {
634 /*
635 * If WBINVD is present and functioning properly,
636 * flush all processor caches before entering C3.
637 */
638 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
639 sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
640 else
641 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
642 }
643 }
644
645 /*
646 * There are several erratums for PIIX4.
647 */
648 if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0)
649 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
650
651 if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0)
652 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
653 }
654
655 static int
656 acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa)
657 {
658
659 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
660 return 0;
661
662 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA ||
663 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC)
664 return 1;
665
666 return 0;
667 }
668
669 static int
670 acpicpu_cstate_latency(struct acpicpu_softc *sc)
671 {
672 static const uint32_t cs_factor = 3;
673 struct acpicpu_cstate *cs;
674 int i;
675
676 for (i = cs_state_max; i > 0; i--) {
677
678 cs = &sc->sc_cstate[i];
679
680 if (__predict_false(cs->cs_method == 0))
681 continue;
682
683 /*
684 * Choose a state if we have previously slept
685 * longer than the worst case latency of the
686 * state times an arbitrary multiplier.
687 */
688 if (sc->sc_sleep > cs->cs_latency * cs_factor)
689 return i;
690 }
691
692 return ACPI_STATE_C1;
693 }
694
695 /*
696 * The main idle loop.
697 */
698 void
699 acpicpu_cstate_idle(void)
700 {
701 struct cpu_info *ci = curcpu();
702 struct acpicpu_softc *sc;
703 int state;
704
705 acpi_md_OsDisableInterrupt();
706
707 KASSERT(acpicpu_sc != NULL);
708 KASSERT(ci->ci_cpuid < maxcpus);
709 KASSERT(ci->ci_ilevel == IPL_NONE);
710
711 sc = acpicpu_sc[ci->ci_cpuid];
712
713 /*
714 * If all CPUs do not have their ACPI counterparts, the softc
715 * may be NULL. In this case fall back to normal C1 with HALT.
716 */
717 if (__predict_false(sc == NULL)) {
718 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
719 return;
720 }
721
722 if (__predict_false(acpi_suspended != 0)) {
723 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
724 return;
725 }
726
727 state = acpicpu_cstate_latency(sc);
728
729 /*
730 * Check for bus master activity. Note that particularly usb(4)
731 * causes high activity, which may prevent the use of C3 states.
732 */
733 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
734
735 if (acpicpu_cstate_bm_check() != false)
736 state--;
737
738 if (__predict_false(sc->sc_cstate[state].cs_method == 0))
739 state = ACPI_STATE_C1;
740 }
741
742 KASSERT(state != ACPI_STATE_C0);
743
744 if (state != ACPI_STATE_C3) {
745 acpicpu_cstate_idle_enter(sc, state);
746 return;
747 }
748
749 /*
750 * On all recent (Intel) CPUs caches are shared
751 * by CPUs and bus master control is required to
752 * keep these coherent while in C3. Flushing the
753 * CPU caches is only the last resort.
754 */
755 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
756 ACPI_FLUSH_CPU_CACHE();
757
758 /*
759 * Allow the bus master to request that any given
760 * CPU should return immediately to C0 from C3.
761 */
762 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
763 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
764
765 /*
766 * It may be necessary to disable bus master arbitration
767 * to ensure that bus master cycles do not occur while
768 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
769 */
770 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
771 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
772
773 acpicpu_cstate_idle_enter(sc, state);
774
775 /*
776 * Disable bus master wake and re-enable the arbiter.
777 */
778 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
779 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
780
781 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
782 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
783 }
784
785 static void
786 acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
787 {
788 struct acpicpu_cstate *cs = &sc->sc_cstate[state];
789 uint32_t end, start, val;
790
791 start = acpitimer_read_safe(NULL);
792
793 switch (cs->cs_method) {
794
795 case ACPICPU_C_STATE_FFH:
796 case ACPICPU_C_STATE_HALT:
797 acpicpu_md_idle_enter(cs->cs_method, state);
798 break;
799
800 case ACPICPU_C_STATE_SYSIO:
801 (void)AcpiOsReadPort(cs->cs_addr, &val, 8);
802 break;
803
804 default:
805 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
806 break;
807 }
808
809 cs->cs_stat++;
810
811 end = acpitimer_read_safe(NULL);
812 sc->sc_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
813
814 acpi_md_OsEnableInterrupt();
815 }
816
817 static bool
818 acpicpu_cstate_bm_check(void)
819 {
820 uint32_t val = 0;
821 ACPI_STATUS rv;
822
823 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
824
825 if (ACPI_FAILURE(rv) || val == 0)
826 return false;
827
828 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
829
830 return true;
831 }
832