acpi_cpu_cstate.c revision 1.17 1 /* $NetBSD: acpi_cpu_cstate.c,v 1.17 2010/08/09 05:00:24 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.17 2010/08/09 05:00:24 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/device.h>
35 #include <sys/kernel.h>
36 #include <sys/once.h>
37 #include <sys/mutex.h>
38 #include <sys/timetc.h>
39
40 #include <dev/pci/pcivar.h>
41 #include <dev/pci/pcidevs.h>
42
43 #include <dev/acpi/acpireg.h>
44 #include <dev/acpi/acpivar.h>
45 #include <dev/acpi/acpi_cpu.h>
46 #include <dev/acpi/acpi_timer.h>
47
48 #include <machine/acpi_machdep.h>
49
50 #define _COMPONENT ACPI_BUS_COMPONENT
51 ACPI_MODULE_NAME ("acpi_cpu_cstate")
52
53 static void acpicpu_cstate_attach_print(struct acpicpu_softc *);
54 static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *);
55 static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *,
56 ACPI_OBJECT *);
57 static void acpicpu_cstate_cst_bios(void);
58 static void acpicpu_cstate_fadt(struct acpicpu_softc *);
59 static void acpicpu_cstate_quirks(struct acpicpu_softc *);
60 static int acpicpu_cstate_quirks_piix4(struct pci_attach_args *);
61 static int acpicpu_cstate_latency(struct acpicpu_softc *);
62 static bool acpicpu_cstate_bm_check(void);
63 static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
64
65 extern struct acpicpu_softc **acpicpu_sc;
66
67 /*
68 * XXX: The local APIC timer (as well as TSC) is typically stopped in C3.
69 * For now, we cannot but disable C3. But there appears to be timer-
70 * related interrupt issues also in C2. The only entirely safe option
71 * at the moment is to use C1.
72 */
73 #ifdef ACPICPU_ENABLE_C3
74 static int cs_state_max = ACPI_STATE_C3;
75 #else
76 static int cs_state_max = ACPI_STATE_C1;
77 #endif
78
79 void
80 acpicpu_cstate_attach(device_t self)
81 {
82 struct acpicpu_softc *sc = device_private(self);
83 ACPI_STATUS rv;
84
85 /*
86 * Either use the preferred _CST or resort to FADT.
87 */
88 rv = acpicpu_cstate_cst(sc);
89
90 switch (rv) {
91
92 case AE_OK:
93 sc->sc_flags |= ACPICPU_FLAG_C_CST;
94 acpicpu_cstate_cst_bios();
95 break;
96
97 default:
98 sc->sc_flags |= ACPICPU_FLAG_C_FADT;
99 acpicpu_cstate_fadt(sc);
100 break;
101 }
102
103 acpicpu_cstate_quirks(sc);
104 acpicpu_cstate_attach_print(sc);
105 }
106
107 void
108 acpicpu_cstate_attach_print(struct acpicpu_softc *sc)
109 {
110 struct acpicpu_cstate *cs;
111 const char *str;
112 int i;
113
114 for (i = 0; i < ACPI_C_STATE_COUNT; i++) {
115
116 cs = &sc->sc_cstate[i];
117
118 if (cs->cs_method == 0)
119 continue;
120
121 switch (cs->cs_method) {
122
123 case ACPICPU_C_STATE_HALT:
124 str = "HALT";
125 break;
126
127 case ACPICPU_C_STATE_FFH:
128 str = "FFH";
129 break;
130
131 case ACPICPU_C_STATE_SYSIO:
132 str = "SYSIO";
133 break;
134
135 default:
136 panic("NOTREACHED");
137 }
138
139 aprint_debug_dev(sc->sc_dev, "C%d: %5s, "
140 "lat %3u us, pow %5u mW, addr 0x%06x, flags 0x%02x\n",
141 i, str, cs->cs_latency, cs->cs_power,
142 (uint32_t)cs->cs_addr, cs->cs_flags);
143 }
144 }
145
146 int
147 acpicpu_cstate_detach(device_t self)
148 {
149 struct acpicpu_softc *sc = device_private(self);
150 static ONCE_DECL(once_detach);
151 int rv;
152
153 rv = RUN_ONCE(&once_detach, acpicpu_md_idle_stop);
154
155 if (rv != 0)
156 return rv;
157
158 sc->sc_flags &= ~ACPICPU_FLAG_C;
159
160 return 0;
161 }
162
163 int
164 acpicpu_cstate_start(device_t self)
165 {
166 struct acpicpu_softc *sc = device_private(self);
167 static ONCE_DECL(once_start);
168 static ONCE_DECL(once_save);
169 int rv;
170
171 /*
172 * Save the existing idle-mechanism and claim the idle_loop(9).
173 * This should be called after all ACPI CPUs have been attached.
174 */
175 rv = RUN_ONCE(&once_save, acpicpu_md_idle_init);
176
177 if (rv != 0)
178 return rv;
179
180 rv = RUN_ONCE(&once_start, acpicpu_md_idle_start);
181
182 if (rv == 0)
183 sc->sc_flags |= ACPICPU_FLAG_C;
184
185 return rv;
186 }
187
188 bool
189 acpicpu_cstate_suspend(device_t self)
190 {
191
192 return true;
193 }
194
195 bool
196 acpicpu_cstate_resume(device_t self)
197 {
198 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
199 struct acpicpu_softc *sc = device_private(self);
200
201 if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0)
202 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
203
204 return true;
205 }
206
207 void
208 acpicpu_cstate_callback(void *aux)
209 {
210 struct acpicpu_softc *sc;
211 device_t self = aux;
212
213 sc = device_private(self);
214
215 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) {
216 KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0);
217 return;
218 }
219
220 mutex_enter(&sc->sc_mtx);
221 (void)acpicpu_cstate_cst(sc);
222 mutex_exit(&sc->sc_mtx);
223 }
224
225 static ACPI_STATUS
226 acpicpu_cstate_cst(struct acpicpu_softc *sc)
227 {
228 ACPI_OBJECT *elm, *obj;
229 ACPI_BUFFER buf;
230 ACPI_STATUS rv;
231 uint32_t i, n;
232 uint8_t count;
233
234 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
235
236 if (ACPI_FAILURE(rv))
237 return rv;
238
239 obj = buf.Pointer;
240
241 if (obj->Type != ACPI_TYPE_PACKAGE) {
242 rv = AE_TYPE;
243 goto out;
244 }
245
246 if (obj->Package.Count < 2) {
247 rv = AE_LIMIT;
248 goto out;
249 }
250
251 elm = obj->Package.Elements;
252
253 if (elm[0].Type != ACPI_TYPE_INTEGER) {
254 rv = AE_TYPE;
255 goto out;
256 }
257
258 n = elm[0].Integer.Value;
259
260 if (n != obj->Package.Count - 1) {
261 rv = AE_BAD_VALUE;
262 goto out;
263 }
264
265 if (n > ACPI_C_STATES_MAX) {
266 rv = AE_LIMIT;
267 goto out;
268 }
269
270 (void)memset(sc->sc_cstate, 0,
271 sizeof(*sc->sc_cstate) * ACPI_C_STATE_COUNT);
272
273 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
274 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
275
276 for (count = 0, i = 1; i <= n; i++) {
277
278 elm = &obj->Package.Elements[i];
279 rv = acpicpu_cstate_cst_add(sc, elm);
280
281 if (ACPI_SUCCESS(rv))
282 count++;
283 }
284
285 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
286
287 out:
288 if (buf.Pointer != NULL)
289 ACPI_FREE(buf.Pointer);
290
291 return rv;
292 }
293
294 static ACPI_STATUS
295 acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm)
296 {
297 const struct acpicpu_object *ao = &sc->sc_object;
298 struct acpicpu_cstate *cs = sc->sc_cstate;
299 struct acpicpu_cstate state;
300 struct acpicpu_reg *reg;
301 ACPI_STATUS rv = AE_OK;
302 ACPI_OBJECT *obj;
303 uint32_t type;
304
305 (void)memset(&state, 0, sizeof(*cs));
306
307 state.cs_flags = ACPICPU_FLAG_C_BM_STS;
308
309 if (elm->Type != ACPI_TYPE_PACKAGE) {
310 rv = AE_TYPE;
311 goto out;
312 }
313
314 if (elm->Package.Count != 4) {
315 rv = AE_LIMIT;
316 goto out;
317 }
318
319 /*
320 * Type.
321 */
322 obj = &elm->Package.Elements[1];
323
324 if (obj->Type != ACPI_TYPE_INTEGER) {
325 rv = AE_TYPE;
326 goto out;
327 }
328
329 type = obj->Integer.Value;
330
331 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
332 rv = AE_TYPE;
333 goto out;
334 }
335
336 /*
337 * Latency.
338 */
339 obj = &elm->Package.Elements[2];
340
341 if (obj->Type != ACPI_TYPE_INTEGER) {
342 rv = AE_TYPE;
343 goto out;
344 }
345
346 state.cs_latency = obj->Integer.Value;
347
348 /*
349 * Power.
350 */
351 obj = &elm->Package.Elements[3];
352
353 if (obj->Type != ACPI_TYPE_INTEGER) {
354 rv = AE_TYPE;
355 goto out;
356 }
357
358 state.cs_power = obj->Integer.Value;
359
360 /*
361 * Register.
362 */
363 obj = &elm->Package.Elements[0];
364
365 if (obj->Type != ACPI_TYPE_BUFFER) {
366 rv = AE_TYPE;
367 goto out;
368 }
369
370 CTASSERT(sizeof(struct acpicpu_reg) == 15);
371
372 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
373 rv = AE_LIMIT;
374 goto out;
375 }
376
377 reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
378
379 switch (reg->reg_spaceid) {
380
381 case ACPI_ADR_SPACE_SYSTEM_IO:
382 state.cs_method = ACPICPU_C_STATE_SYSIO;
383
384 if (reg->reg_addr == 0) {
385 rv = AE_AML_ILLEGAL_ADDRESS;
386 goto out;
387 }
388
389 if (reg->reg_bitwidth != 8) {
390 rv = AE_AML_BAD_RESOURCE_LENGTH;
391 goto out;
392 }
393
394 /*
395 * Check only that the address is in the mapped space.
396 * Systems are allowed to change it when operating
397 * with _CST (see ACPI 4.0, pp. 94-95). For instance,
398 * the offset of P_LVL3 may change depending on whether
399 * acpiacad(4) is connected or disconnected.
400 */
401 if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) {
402 rv = AE_BAD_ADDRESS;
403 goto out;
404 }
405
406 state.cs_addr = reg->reg_addr;
407 break;
408
409 case ACPI_ADR_SPACE_FIXED_HARDWARE:
410 state.cs_method = ACPICPU_C_STATE_FFH;
411
412 switch (type) {
413
414 case ACPI_STATE_C1:
415
416 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0)
417 state.cs_method = ACPICPU_C_STATE_HALT;
418
419 break;
420
421 default:
422
423 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) {
424 rv = AE_SUPPORT;
425 goto out;
426 }
427 }
428
429 if (sc->sc_cap != 0) {
430
431 /*
432 * The _CST FFH GAS encoding may contain
433 * additional hints on Intel processors.
434 * Use these to determine whether we can
435 * avoid the bus master activity check.
436 */
437 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
438 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
439 }
440
441 break;
442
443 default:
444 rv = AE_AML_INVALID_SPACE_ID;
445 goto out;
446 }
447
448 if (cs[type].cs_method != 0) {
449 rv = AE_ALREADY_EXISTS;
450 goto out;
451 }
452
453 cs[type].cs_addr = state.cs_addr;
454 cs[type].cs_power = state.cs_power;
455 cs[type].cs_flags = state.cs_flags;
456 cs[type].cs_method = state.cs_method;
457 cs[type].cs_latency = state.cs_latency;
458
459 out:
460 if (ACPI_FAILURE(rv))
461 aprint_debug_dev(sc->sc_dev, "invalid "
462 "_CST: %s\n", AcpiFormatException(rv));
463
464 return rv;
465 }
466
467 static void
468 acpicpu_cstate_cst_bios(void)
469 {
470 const uint8_t val = AcpiGbl_FADT.CstControl;
471 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
472
473 if (addr == 0)
474 return;
475
476 (void)AcpiOsWritePort(addr, val, 8);
477 }
478
479 static void
480 acpicpu_cstate_fadt(struct acpicpu_softc *sc)
481 {
482 struct acpicpu_cstate *cs = sc->sc_cstate;
483
484 (void)memset(cs, 0, sizeof(*cs) * ACPI_C_STATE_COUNT);
485
486 /*
487 * All x86 processors should support C1 (a.k.a. HALT).
488 */
489 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0)
490 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
491
492 if ((acpicpu_md_cpus_running() > 1) &&
493 (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
494 return;
495
496 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
497 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
498
499 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
500 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
501
502 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
503 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
504
505 /*
506 * The P_BLK length should always be 6. If it
507 * is not, reduce functionality accordingly.
508 * Sanity check also FADT's latency levels.
509 */
510 if (sc->sc_object.ao_pblklen < 5)
511 cs[ACPI_STATE_C2].cs_method = 0;
512
513 if (sc->sc_object.ao_pblklen < 6)
514 cs[ACPI_STATE_C3].cs_method = 0;
515
516 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
517 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
518
519 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
520 cs[ACPI_STATE_C2].cs_method = 0;
521
522 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
523 cs[ACPI_STATE_C3].cs_method = 0;
524 }
525
526 static void
527 acpicpu_cstate_quirks(struct acpicpu_softc *sc)
528 {
529 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
530 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
531 struct pci_attach_args pa;
532
533 /*
534 * Check bus master arbitration. If ARB_DIS
535 * is not available, processor caches must be
536 * flushed before C3 (ACPI 4.0, section 8.2).
537 */
538 if (reg != 0 && len != 0)
539 sc->sc_flags |= ACPICPU_FLAG_C_ARB;
540 else {
541 /*
542 * Disable C3 entirely if WBINVD is not present.
543 */
544 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
545 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
546 else {
547 /*
548 * If WBINVD is present and functioning properly,
549 * flush all processor caches before entering C3.
550 */
551 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
552 sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
553 else
554 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
555 }
556 }
557
558 /*
559 * There are several erratums for PIIX4.
560 */
561 if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0)
562 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
563
564 if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0)
565 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
566 }
567
568 static int
569 acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa)
570 {
571
572 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
573 return 0;
574
575 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA ||
576 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC)
577 return 1;
578
579 return 0;
580 }
581
582 static int
583 acpicpu_cstate_latency(struct acpicpu_softc *sc)
584 {
585 static const uint32_t cs_factor = 3;
586 struct acpicpu_cstate *cs;
587 int i;
588
589 for (i = cs_state_max; i > 0; i--) {
590
591 cs = &sc->sc_cstate[i];
592
593 if (__predict_false(cs->cs_method == 0))
594 continue;
595
596 /*
597 * Choose a state if we have previously slept
598 * longer than the worst case latency of the
599 * state times an arbitrary multiplier.
600 */
601 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
602 return i;
603 }
604
605 return ACPI_STATE_C1;
606 }
607
608 /*
609 * The main idle loop.
610 */
611 void
612 acpicpu_cstate_idle(void)
613 {
614 struct cpu_info *ci = curcpu();
615 struct acpicpu_softc *sc;
616 int state;
617
618 if (__predict_false(ci->ci_want_resched) != 0)
619 return;
620
621 acpi_md_OsDisableInterrupt();
622
623 KASSERT(acpicpu_sc != NULL);
624 KASSERT(ci->ci_acpiid < maxcpus);
625 KASSERT(ci->ci_ilevel == IPL_NONE);
626
627 sc = acpicpu_sc[ci->ci_acpiid];
628
629 if (__predict_false(sc == NULL))
630 goto halt;
631
632 if (__predict_false(sc->sc_cold != false))
633 goto halt;
634
635 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_C) == 0))
636 goto halt;
637
638 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
639 goto halt;
640
641 mutex_exit(&sc->sc_mtx);
642 state = acpicpu_cstate_latency(sc);
643
644 /*
645 * Check for bus master activity. Note that particularly usb(4)
646 * causes high activity, which may prevent the use of C3 states.
647 */
648 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
649
650 if (acpicpu_cstate_bm_check() != false)
651 state--;
652
653 if (__predict_false(sc->sc_cstate[state].cs_method == 0))
654 state = ACPI_STATE_C1;
655 }
656
657 KASSERT(state != ACPI_STATE_C0);
658
659 if (state != ACPI_STATE_C3) {
660 acpicpu_cstate_idle_enter(sc, state);
661 return;
662 }
663
664 /*
665 * On all recent (Intel) CPUs caches are shared
666 * by CPUs and bus master control is required to
667 * keep these coherent while in C3. Flushing the
668 * CPU caches is only the last resort.
669 */
670 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
671 ACPI_FLUSH_CPU_CACHE();
672
673 /*
674 * Allow the bus master to request that any given
675 * CPU should return immediately to C0 from C3.
676 */
677 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
678 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
679
680 /*
681 * It may be necessary to disable bus master arbitration
682 * to ensure that bus master cycles do not occur while
683 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
684 */
685 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
686 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
687
688 acpicpu_cstate_idle_enter(sc, state);
689
690 /*
691 * Disable bus master wake and re-enable the arbiter.
692 */
693 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
694 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
695
696 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
697 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
698
699 return;
700
701 halt:
702 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
703 }
704
705 static void
706 acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
707 {
708 struct acpicpu_cstate *cs = &sc->sc_cstate[state];
709 uint32_t end, start, val;
710
711 start = acpitimer_read_safe(NULL);
712
713 switch (cs->cs_method) {
714
715 case ACPICPU_C_STATE_FFH:
716 case ACPICPU_C_STATE_HALT:
717 acpicpu_md_idle_enter(cs->cs_method, state);
718 break;
719
720 case ACPICPU_C_STATE_SYSIO:
721 (void)AcpiOsReadPort(cs->cs_addr, &val, 8);
722 break;
723
724 default:
725 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
726 break;
727 }
728
729 cs->cs_stat++;
730
731 end = acpitimer_read_safe(NULL);
732 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
733
734 acpi_md_OsEnableInterrupt();
735 }
736
737 static bool
738 acpicpu_cstate_bm_check(void)
739 {
740 uint32_t val = 0;
741 ACPI_STATUS rv;
742
743 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
744
745 if (ACPI_FAILURE(rv) || val == 0)
746 return false;
747
748 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
749
750 return true;
751 }
752