acpi_cpu_cstate.c revision 1.49 1 /* $NetBSD: acpi_cpu_cstate.c,v 1.49 2011/03/01 05:57:04 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.49 2011/03/01 05:57:04 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/device.h>
35 #include <sys/kernel.h>
36 #include <sys/once.h>
37 #include <sys/mutex.h>
38 #include <sys/timetc.h>
39
40 #include <dev/acpi/acpireg.h>
41 #include <dev/acpi/acpivar.h>
42 #include <dev/acpi/acpi_cpu.h>
43 #include <dev/acpi/acpi_timer.h>
44
45 #include <machine/acpi_machdep.h>
46
47 #define _COMPONENT ACPI_BUS_COMPONENT
48 ACPI_MODULE_NAME ("acpi_cpu_cstate")
49
50 static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *);
51 static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *,
52 ACPI_OBJECT *, int );
53 static void acpicpu_cstate_cst_bios(void);
54 static void acpicpu_cstate_memset(struct acpicpu_softc *);
55 static ACPI_STATUS acpicpu_cstate_dep(struct acpicpu_softc *);
56 static void acpicpu_cstate_fadt(struct acpicpu_softc *);
57 static void acpicpu_cstate_quirks(struct acpicpu_softc *);
58 static int acpicpu_cstate_latency(struct acpicpu_softc *);
59 static bool acpicpu_cstate_bm_check(void);
60 static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
61
62 extern struct acpicpu_softc **acpicpu_sc;
63
64 /*
65 * XXX: The local APIC timer (as well as TSC) is typically stopped in C3.
66 * For now, we cannot but disable C3. But there appears to be timer-
67 * related interrupt issues also in C2. The only entirely safe option
68 * at the moment is to use C1.
69 */
70 #ifdef ACPICPU_ENABLE_C3
71 static int cs_state_max = ACPI_STATE_C3;
72 #else
73 static int cs_state_max = ACPI_STATE_C1;
74 #endif
75
76 void
77 acpicpu_cstate_attach(device_t self)
78 {
79 struct acpicpu_softc *sc = device_private(self);
80 ACPI_STATUS rv;
81
82 /*
83 * Either use the preferred _CST or resort to FADT.
84 */
85 rv = acpicpu_cstate_cst(sc);
86
87 switch (rv) {
88
89 case AE_OK:
90 acpicpu_cstate_cst_bios();
91 break;
92
93 default:
94 sc->sc_flags |= ACPICPU_FLAG_C_FADT;
95 acpicpu_cstate_fadt(sc);
96 break;
97 }
98
99 /*
100 * Query the optional _CSD.
101 */
102 rv = acpicpu_cstate_dep(sc);
103
104 if (ACPI_SUCCESS(rv))
105 sc->sc_flags |= ACPICPU_FLAG_C_DEP;
106
107 sc->sc_flags |= ACPICPU_FLAG_C;
108
109 acpicpu_cstate_quirks(sc);
110 }
111
112 int
113 acpicpu_cstate_detach(device_t self)
114 {
115 struct acpicpu_softc *sc = device_private(self);
116 static ONCE_DECL(once_detach);
117 int rv;
118
119 rv = RUN_ONCE(&once_detach, acpicpu_md_cstate_stop);
120
121 if (rv != 0)
122 return rv;
123
124 sc->sc_flags &= ~ACPICPU_FLAG_C;
125
126 return 0;
127 }
128
129 void
130 acpicpu_cstate_start(device_t self)
131 {
132 struct acpicpu_softc *sc = device_private(self);
133
134 (void)acpicpu_md_cstate_start(sc);
135 }
136
137 bool
138 acpicpu_cstate_suspend(device_t self)
139 {
140 return true;
141 }
142
143 bool
144 acpicpu_cstate_resume(device_t self)
145 {
146 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
147 struct acpicpu_softc *sc = device_private(self);
148
149 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) == 0)
150 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
151
152 return true;
153 }
154
155 void
156 acpicpu_cstate_callback(void *aux)
157 {
158 struct acpicpu_softc *sc;
159 device_t self = aux;
160
161 sc = device_private(self);
162
163 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0)
164 return;
165
166 mutex_enter(&sc->sc_mtx);
167 (void)acpicpu_cstate_cst(sc);
168 mutex_exit(&sc->sc_mtx);
169 }
170
171 static ACPI_STATUS
172 acpicpu_cstate_cst(struct acpicpu_softc *sc)
173 {
174 ACPI_OBJECT *elm, *obj;
175 ACPI_BUFFER buf;
176 ACPI_STATUS rv;
177 uint32_t i, n;
178 uint8_t count;
179
180 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
181
182 if (ACPI_FAILURE(rv))
183 return rv;
184
185 obj = buf.Pointer;
186
187 if (obj->Type != ACPI_TYPE_PACKAGE) {
188 rv = AE_TYPE;
189 goto out;
190 }
191
192 if (obj->Package.Count < 2) {
193 rv = AE_LIMIT;
194 goto out;
195 }
196
197 elm = obj->Package.Elements;
198
199 if (elm[0].Type != ACPI_TYPE_INTEGER) {
200 rv = AE_TYPE;
201 goto out;
202 }
203
204 n = elm[0].Integer.Value;
205
206 if (n != obj->Package.Count - 1) {
207 rv = AE_BAD_VALUE;
208 goto out;
209 }
210
211 if (n > ACPI_C_STATES_MAX) {
212 rv = AE_LIMIT;
213 goto out;
214 }
215
216 acpicpu_cstate_memset(sc);
217
218 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
219 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
220
221 for (count = 0, i = 1; i <= n; i++) {
222
223 elm = &obj->Package.Elements[i];
224 rv = acpicpu_cstate_cst_add(sc, elm, i);
225
226 if (ACPI_SUCCESS(rv))
227 count++;
228 }
229
230 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
231
232 out:
233 if (buf.Pointer != NULL)
234 ACPI_FREE(buf.Pointer);
235
236 return rv;
237 }
238
239 static ACPI_STATUS
240 acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm, int i)
241 {
242 struct acpicpu_cstate *cs = sc->sc_cstate;
243 struct acpicpu_cstate state;
244 struct acpicpu_reg *reg;
245 ACPI_STATUS rv = AE_OK;
246 ACPI_OBJECT *obj;
247 uint32_t type;
248
249 (void)memset(&state, 0, sizeof(*cs));
250
251 state.cs_flags = ACPICPU_FLAG_C_BM_STS;
252
253 if (elm->Type != ACPI_TYPE_PACKAGE) {
254 rv = AE_TYPE;
255 goto out;
256 }
257
258 if (elm->Package.Count != 4) {
259 rv = AE_LIMIT;
260 goto out;
261 }
262
263 /*
264 * Type.
265 */
266 obj = &elm->Package.Elements[1];
267
268 if (obj->Type != ACPI_TYPE_INTEGER) {
269 rv = AE_TYPE;
270 goto out;
271 }
272
273 type = obj->Integer.Value;
274
275 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
276 rv = AE_TYPE;
277 goto out;
278 }
279
280 /*
281 * Latency.
282 */
283 obj = &elm->Package.Elements[2];
284
285 if (obj->Type != ACPI_TYPE_INTEGER) {
286 rv = AE_TYPE;
287 goto out;
288 }
289
290 state.cs_latency = obj->Integer.Value;
291
292 /*
293 * Power.
294 */
295 obj = &elm->Package.Elements[3];
296
297 if (obj->Type != ACPI_TYPE_INTEGER) {
298 rv = AE_TYPE;
299 goto out;
300 }
301
302 state.cs_power = obj->Integer.Value;
303
304 /*
305 * Register.
306 */
307 obj = &elm->Package.Elements[0];
308
309 if (obj->Type != ACPI_TYPE_BUFFER) {
310 rv = AE_TYPE;
311 goto out;
312 }
313
314 CTASSERT(sizeof(struct acpicpu_reg) == 15);
315
316 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
317 rv = AE_LIMIT;
318 goto out;
319 }
320
321 reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
322
323 switch (reg->reg_spaceid) {
324
325 case ACPI_ADR_SPACE_SYSTEM_IO:
326 state.cs_method = ACPICPU_C_STATE_SYSIO;
327
328 if (reg->reg_addr == 0) {
329 rv = AE_AML_ILLEGAL_ADDRESS;
330 goto out;
331 }
332
333 if (reg->reg_bitwidth != 8) {
334 rv = AE_AML_BAD_RESOURCE_LENGTH;
335 goto out;
336 }
337
338 state.cs_addr = reg->reg_addr;
339 break;
340
341 case ACPI_ADR_SPACE_FIXED_HARDWARE:
342 state.cs_method = ACPICPU_C_STATE_FFH;
343
344 switch (type) {
345
346 case ACPI_STATE_C1:
347
348 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0)
349 state.cs_method = ACPICPU_C_STATE_HALT;
350
351 break;
352
353 default:
354
355 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) {
356 rv = AE_SUPPORT;
357 goto out;
358 }
359 }
360
361 if (sc->sc_cap != 0) {
362
363 /*
364 * The _CST FFH GAS encoding may contain
365 * additional hints on Intel processors.
366 * Use these to determine whether we can
367 * avoid the bus master activity check.
368 */
369 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
370 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
371 }
372
373 break;
374
375 default:
376 rv = AE_AML_INVALID_SPACE_ID;
377 goto out;
378 }
379
380 /*
381 * As some systems define the type arbitrarily,
382 * we use a sequential counter instead of the
383 * BIOS data. For instance, AMD family 14h is
384 * instructed to only use the value 2; see
385 *
386 * Advanced Micro Devices: BIOS and Kernel
387 * Developer's Guide (BKDG) for AMD Family
388 * 14h Models 00h-0Fh Processors. Revision
389 * 3.00, January 4, 2011.
390 */
391 if (i != (int)type) {
392
393 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
394 "C%d != C%u from BIOS", i, type));
395 }
396
397 KASSERT(cs[i].cs_method == 0);
398
399 cs[i].cs_addr = state.cs_addr;
400 cs[i].cs_power = state.cs_power;
401 cs[i].cs_flags = state.cs_flags;
402 cs[i].cs_method = state.cs_method;
403 cs[i].cs_latency = state.cs_latency;
404
405 out:
406 if (ACPI_FAILURE(rv))
407 aprint_error_dev(sc->sc_dev, "failed to add "
408 "C-state: %s\n", AcpiFormatException(rv));
409
410 i++;
411
412 return rv;
413 }
414
415 static void
416 acpicpu_cstate_cst_bios(void)
417 {
418 const uint8_t val = AcpiGbl_FADT.CstControl;
419 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
420
421 if (addr == 0 || val == 0)
422 return;
423
424 (void)AcpiOsWritePort(addr, val, 8);
425 }
426
427 static void
428 acpicpu_cstate_memset(struct acpicpu_softc *sc)
429 {
430 uint8_t i = 0;
431
432 while (i < __arraycount(sc->sc_cstate)) {
433
434 sc->sc_cstate[i].cs_addr = 0;
435 sc->sc_cstate[i].cs_power = 0;
436 sc->sc_cstate[i].cs_flags = 0;
437 sc->sc_cstate[i].cs_method = 0;
438 sc->sc_cstate[i].cs_latency = 0;
439
440 i++;
441 }
442 }
443
444 static ACPI_STATUS
445 acpicpu_cstate_dep(struct acpicpu_softc *sc)
446 {
447 ACPI_OBJECT *elm, *obj;
448 ACPI_BUFFER buf;
449 ACPI_STATUS rv;
450 uint32_t val;
451 uint8_t i, n;
452
453 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CSD", &buf);
454
455 if (ACPI_FAILURE(rv))
456 goto out;
457
458 obj = buf.Pointer;
459
460 if (obj->Type != ACPI_TYPE_PACKAGE) {
461 rv = AE_TYPE;
462 goto out;
463 }
464
465 if (obj->Package.Count != 1) {
466 rv = AE_LIMIT;
467 goto out;
468 }
469
470 elm = &obj->Package.Elements[0];
471
472 if (obj->Type != ACPI_TYPE_PACKAGE) {
473 rv = AE_TYPE;
474 goto out;
475 }
476
477 n = elm->Package.Count;
478
479 if (n != 6) {
480 rv = AE_LIMIT;
481 goto out;
482 }
483
484 elm = elm->Package.Elements;
485
486 for (i = 0; i < n; i++) {
487
488 if (elm[i].Type != ACPI_TYPE_INTEGER) {
489 rv = AE_TYPE;
490 goto out;
491 }
492
493 if (elm[i].Integer.Value > UINT32_MAX) {
494 rv = AE_AML_NUMERIC_OVERFLOW;
495 goto out;
496 }
497 }
498
499 val = elm[1].Integer.Value;
500
501 if (val != 0)
502 aprint_debug_dev(sc->sc_dev, "invalid revision in _CSD\n");
503
504 val = elm[3].Integer.Value;
505
506 if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) {
507 rv = AE_AML_BAD_RESOURCE_VALUE;
508 goto out;
509 }
510
511 val = elm[4].Integer.Value;
512
513 if (val > sc->sc_ncpus) {
514 rv = AE_BAD_VALUE;
515 goto out;
516 }
517
518 sc->sc_cstate_dep.dep_domain = elm[2].Integer.Value;
519 sc->sc_cstate_dep.dep_type = elm[3].Integer.Value;
520 sc->sc_cstate_dep.dep_ncpus = elm[4].Integer.Value;
521 sc->sc_cstate_dep.dep_index = elm[5].Integer.Value;
522
523 out:
524 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
525 aprint_debug_dev(sc->sc_dev, "failed to evaluate "
526 "_CSD: %s\n", AcpiFormatException(rv));
527
528 if (buf.Pointer != NULL)
529 ACPI_FREE(buf.Pointer);
530
531 return rv;
532 }
533
534 static void
535 acpicpu_cstate_fadt(struct acpicpu_softc *sc)
536 {
537 struct acpicpu_cstate *cs = sc->sc_cstate;
538
539 acpicpu_cstate_memset(sc);
540
541 /*
542 * All x86 processors should support C1 (a.k.a. HALT).
543 */
544 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
545
546 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) == 0)
547 aprint_debug_dev(sc->sc_dev, "HALT not supported?\n");
548
549 if (sc->sc_object.ao_pblkaddr == 0)
550 return;
551
552 if (sc->sc_ncpus > 1) {
553
554 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
555 return;
556 }
557
558 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
559 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
560
561 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
562 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
563
564 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
565 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
566
567 /*
568 * The P_BLK length should always be 6. If it
569 * is not, reduce functionality accordingly.
570 */
571 if (sc->sc_object.ao_pblklen < 5)
572 cs[ACPI_STATE_C2].cs_method = 0;
573
574 if (sc->sc_object.ao_pblklen < 6)
575 cs[ACPI_STATE_C3].cs_method = 0;
576
577 /*
578 * Sanity check the latency levels in FADT.
579 * Values above the thresholds are used to
580 * inform that C-states are not supported.
581 */
582 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
583 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
584
585 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
586 cs[ACPI_STATE_C2].cs_method = 0;
587
588 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
589 cs[ACPI_STATE_C3].cs_method = 0;
590 }
591
592 static void
593 acpicpu_cstate_quirks(struct acpicpu_softc *sc)
594 {
595 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
596 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
597
598 /*
599 * Disable C3 for PIIX4.
600 */
601 if ((sc->sc_flags & ACPICPU_FLAG_PIIX4) != 0) {
602 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
603 return;
604 }
605
606 /*
607 * Check bus master arbitration. If ARB_DIS
608 * is not available, processor caches must be
609 * flushed before C3 (ACPI 4.0, section 8.2).
610 */
611 if (reg != 0 && len != 0) {
612 sc->sc_flags |= ACPICPU_FLAG_C_ARB;
613 return;
614 }
615
616 /*
617 * Disable C3 entirely if WBINVD is not present.
618 */
619 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
620 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
621 else {
622 /*
623 * If WBINVD is present and functioning properly,
624 * flush all processor caches before entering C3.
625 */
626 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
627 sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
628 else
629 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
630 }
631 }
632
633 static int
634 acpicpu_cstate_latency(struct acpicpu_softc *sc)
635 {
636 static const uint32_t cs_factor = 3;
637 struct acpicpu_cstate *cs;
638 int i;
639
640 for (i = cs_state_max; i > 0; i--) {
641
642 cs = &sc->sc_cstate[i];
643
644 if (__predict_false(cs->cs_method == 0))
645 continue;
646
647 /*
648 * Choose a state if we have previously slept
649 * longer than the worst case latency of the
650 * state times an arbitrary multiplier.
651 */
652 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
653 return i;
654 }
655
656 return ACPI_STATE_C1;
657 }
658
659 /*
660 * The main idle loop.
661 */
662 void
663 acpicpu_cstate_idle(void)
664 {
665 struct cpu_info *ci = curcpu();
666 struct acpicpu_softc *sc;
667 int state;
668
669 acpi_md_OsDisableInterrupt();
670
671 if (__predict_false(ci->ci_want_resched != 0))
672 goto out;
673
674 KASSERT(acpicpu_sc != NULL);
675 KASSERT(ci->ci_acpiid < maxcpus);
676
677 sc = acpicpu_sc[ci->ci_acpiid];
678
679 if (__predict_false(sc == NULL))
680 goto out;
681
682 KASSERT(ci->ci_ilevel == IPL_NONE);
683 KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0);
684
685 if (__predict_false(sc->sc_cold != false))
686 goto out;
687
688 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
689 goto out;
690
691 mutex_exit(&sc->sc_mtx);
692 state = acpicpu_cstate_latency(sc);
693
694 /*
695 * Apply AMD C1E quirk.
696 */
697 if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0)
698 acpicpu_md_quirk_c1e();
699
700 /*
701 * Check for bus master activity. Note that particularly usb(4)
702 * causes high activity, which may prevent the use of C3 states.
703 */
704 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
705
706 if (acpicpu_cstate_bm_check() != false)
707 state--;
708
709 if (__predict_false(sc->sc_cstate[state].cs_method == 0))
710 state = ACPI_STATE_C1;
711 }
712
713 KASSERT(state != ACPI_STATE_C0);
714
715 if (state != ACPI_STATE_C3) {
716 acpicpu_cstate_idle_enter(sc, state);
717 return;
718 }
719
720 /*
721 * On all recent (Intel) CPUs caches are shared
722 * by CPUs and bus master control is required to
723 * keep these coherent while in C3. Flushing the
724 * CPU caches is only the last resort.
725 */
726 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
727 ACPI_FLUSH_CPU_CACHE();
728
729 /*
730 * Allow the bus master to request that any given
731 * CPU should return immediately to C0 from C3.
732 */
733 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
734 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
735
736 /*
737 * It may be necessary to disable bus master arbitration
738 * to ensure that bus master cycles do not occur while
739 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
740 */
741 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
742 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
743
744 acpicpu_cstate_idle_enter(sc, state);
745
746 /*
747 * Disable bus master wake and re-enable the arbiter.
748 */
749 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
750 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
751
752 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
753 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
754
755 return;
756
757 out:
758 acpi_md_OsEnableInterrupt();
759 }
760
761 static void
762 acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
763 {
764 struct acpicpu_cstate *cs = &sc->sc_cstate[state];
765 uint32_t end, start, val;
766
767 start = acpitimer_read_fast(NULL);
768
769 switch (cs->cs_method) {
770
771 case ACPICPU_C_STATE_FFH:
772 case ACPICPU_C_STATE_HALT:
773 acpicpu_md_cstate_enter(cs->cs_method, state);
774 break;
775
776 case ACPICPU_C_STATE_SYSIO:
777 (void)AcpiOsReadPort(cs->cs_addr, &val, 8);
778 break;
779 }
780
781 acpi_md_OsEnableInterrupt();
782
783 cs->cs_evcnt.ev_count++;
784 end = acpitimer_read_fast(NULL);
785 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
786 }
787
788 static bool
789 acpicpu_cstate_bm_check(void)
790 {
791 uint32_t val = 0;
792 ACPI_STATUS rv;
793
794 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
795
796 if (ACPI_FAILURE(rv) || val == 0)
797 return false;
798
799 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
800
801 return true;
802 }
803