acpi_cpu_cstate.c revision 1.48 1 /* $NetBSD: acpi_cpu_cstate.c,v 1.48 2011/03/01 05:37:02 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.48 2011/03/01 05:37:02 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/device.h>
35 #include <sys/evcnt.h>
36 #include <sys/kernel.h>
37 #include <sys/once.h>
38 #include <sys/mutex.h>
39 #include <sys/timetc.h>
40
41 #include <dev/acpi/acpireg.h>
42 #include <dev/acpi/acpivar.h>
43 #include <dev/acpi/acpi_cpu.h>
44 #include <dev/acpi/acpi_timer.h>
45
46 #include <machine/acpi_machdep.h>
47
48 #define _COMPONENT ACPI_BUS_COMPONENT
49 ACPI_MODULE_NAME ("acpi_cpu_cstate")
50
51 static void acpicpu_cstate_attach_evcnt(struct acpicpu_softc *);
52 static void acpicpu_cstate_detach_evcnt(struct acpicpu_softc *);
53 static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *);
54 static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *,
55 ACPI_OBJECT *, int );
56 static void acpicpu_cstate_cst_bios(void);
57 static void acpicpu_cstate_memset(struct acpicpu_softc *);
58 static ACPI_STATUS acpicpu_cstate_dep(struct acpicpu_softc *);
59 static void acpicpu_cstate_fadt(struct acpicpu_softc *);
60 static void acpicpu_cstate_quirks(struct acpicpu_softc *);
61 static int acpicpu_cstate_latency(struct acpicpu_softc *);
62 static bool acpicpu_cstate_bm_check(void);
63 static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
64
65 extern struct acpicpu_softc **acpicpu_sc;
66
67 /*
68 * XXX: The local APIC timer (as well as TSC) is typically stopped in C3.
69 * For now, we cannot but disable C3. But there appears to be timer-
70 * related interrupt issues also in C2. The only entirely safe option
71 * at the moment is to use C1.
72 */
73 #ifdef ACPICPU_ENABLE_C3
74 static int cs_state_max = ACPI_STATE_C3;
75 #else
76 static int cs_state_max = ACPI_STATE_C1;
77 #endif
78
79 void
80 acpicpu_cstate_attach(device_t self)
81 {
82 struct acpicpu_softc *sc = device_private(self);
83 ACPI_STATUS rv;
84
85 /*
86 * Either use the preferred _CST or resort to FADT.
87 */
88 rv = acpicpu_cstate_cst(sc);
89
90 switch (rv) {
91
92 case AE_OK:
93 acpicpu_cstate_cst_bios();
94 break;
95
96 default:
97 sc->sc_flags |= ACPICPU_FLAG_C_FADT;
98 acpicpu_cstate_fadt(sc);
99 break;
100 }
101
102 /*
103 * Query the optional _CSD.
104 */
105 rv = acpicpu_cstate_dep(sc);
106
107 if (ACPI_SUCCESS(rv))
108 sc->sc_flags |= ACPICPU_FLAG_C_DEP;
109
110 sc->sc_flags |= ACPICPU_FLAG_C;
111
112 acpicpu_cstate_quirks(sc);
113 acpicpu_cstate_attach_evcnt(sc);
114 }
115
116 static void
117 acpicpu_cstate_attach_evcnt(struct acpicpu_softc *sc)
118 {
119 struct acpicpu_cstate *cs;
120 const char *str;
121 uint8_t i;
122
123 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
124
125 cs = &sc->sc_cstate[i];
126
127 if (cs->cs_method == 0)
128 continue;
129
130 str = "HALT";
131
132 if (cs->cs_method == ACPICPU_C_STATE_FFH)
133 str = "MWAIT";
134
135 if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
136 str = "I/O";
137
138 (void)snprintf(cs->cs_name, sizeof(cs->cs_name),
139 "C%d (%s)", i, str);
140
141 evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
142 NULL, device_xname(sc->sc_dev), cs->cs_name);
143 }
144 }
145
146 int
147 acpicpu_cstate_detach(device_t self)
148 {
149 struct acpicpu_softc *sc = device_private(self);
150 static ONCE_DECL(once_detach);
151 int rv;
152
153 rv = RUN_ONCE(&once_detach, acpicpu_md_cstate_stop);
154
155 if (rv != 0)
156 return rv;
157
158 sc->sc_flags &= ~ACPICPU_FLAG_C;
159 acpicpu_cstate_detach_evcnt(sc);
160
161 return 0;
162 }
163
164 static void
165 acpicpu_cstate_detach_evcnt(struct acpicpu_softc *sc)
166 {
167 struct acpicpu_cstate *cs;
168 uint8_t i;
169
170 for (i = 0; i < __arraycount(sc->sc_cstate); i++) {
171
172 cs = &sc->sc_cstate[i];
173
174 if (cs->cs_method != 0)
175 evcnt_detach(&cs->cs_evcnt);
176 }
177 }
178
179 void
180 acpicpu_cstate_start(device_t self)
181 {
182 struct acpicpu_softc *sc = device_private(self);
183
184 (void)acpicpu_md_cstate_start(sc);
185 }
186
187 bool
188 acpicpu_cstate_suspend(device_t self)
189 {
190 return true;
191 }
192
193 bool
194 acpicpu_cstate_resume(device_t self)
195 {
196 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
197 struct acpicpu_softc *sc = device_private(self);
198
199 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) == 0)
200 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
201
202 return true;
203 }
204
205 void
206 acpicpu_cstate_callback(void *aux)
207 {
208 struct acpicpu_softc *sc;
209 device_t self = aux;
210
211 sc = device_private(self);
212
213 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0)
214 return;
215
216 mutex_enter(&sc->sc_mtx);
217 (void)acpicpu_cstate_cst(sc);
218 mutex_exit(&sc->sc_mtx);
219 }
220
221 static ACPI_STATUS
222 acpicpu_cstate_cst(struct acpicpu_softc *sc)
223 {
224 ACPI_OBJECT *elm, *obj;
225 ACPI_BUFFER buf;
226 ACPI_STATUS rv;
227 uint32_t i, n;
228 uint8_t count;
229
230 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
231
232 if (ACPI_FAILURE(rv))
233 return rv;
234
235 obj = buf.Pointer;
236
237 if (obj->Type != ACPI_TYPE_PACKAGE) {
238 rv = AE_TYPE;
239 goto out;
240 }
241
242 if (obj->Package.Count < 2) {
243 rv = AE_LIMIT;
244 goto out;
245 }
246
247 elm = obj->Package.Elements;
248
249 if (elm[0].Type != ACPI_TYPE_INTEGER) {
250 rv = AE_TYPE;
251 goto out;
252 }
253
254 n = elm[0].Integer.Value;
255
256 if (n != obj->Package.Count - 1) {
257 rv = AE_BAD_VALUE;
258 goto out;
259 }
260
261 if (n > ACPI_C_STATES_MAX) {
262 rv = AE_LIMIT;
263 goto out;
264 }
265
266 acpicpu_cstate_memset(sc);
267
268 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
269 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
270
271 for (count = 0, i = 1; i <= n; i++) {
272
273 elm = &obj->Package.Elements[i];
274 rv = acpicpu_cstate_cst_add(sc, elm, i);
275
276 if (ACPI_SUCCESS(rv))
277 count++;
278 }
279
280 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
281
282 out:
283 if (buf.Pointer != NULL)
284 ACPI_FREE(buf.Pointer);
285
286 return rv;
287 }
288
289 static ACPI_STATUS
290 acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm, int i)
291 {
292 struct acpicpu_cstate *cs = sc->sc_cstate;
293 struct acpicpu_cstate state;
294 struct acpicpu_reg *reg;
295 ACPI_STATUS rv = AE_OK;
296 ACPI_OBJECT *obj;
297 uint32_t type;
298
299 (void)memset(&state, 0, sizeof(*cs));
300
301 state.cs_flags = ACPICPU_FLAG_C_BM_STS;
302
303 if (elm->Type != ACPI_TYPE_PACKAGE) {
304 rv = AE_TYPE;
305 goto out;
306 }
307
308 if (elm->Package.Count != 4) {
309 rv = AE_LIMIT;
310 goto out;
311 }
312
313 /*
314 * Type.
315 */
316 obj = &elm->Package.Elements[1];
317
318 if (obj->Type != ACPI_TYPE_INTEGER) {
319 rv = AE_TYPE;
320 goto out;
321 }
322
323 type = obj->Integer.Value;
324
325 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
326 rv = AE_TYPE;
327 goto out;
328 }
329
330 /*
331 * Latency.
332 */
333 obj = &elm->Package.Elements[2];
334
335 if (obj->Type != ACPI_TYPE_INTEGER) {
336 rv = AE_TYPE;
337 goto out;
338 }
339
340 state.cs_latency = obj->Integer.Value;
341
342 /*
343 * Power.
344 */
345 obj = &elm->Package.Elements[3];
346
347 if (obj->Type != ACPI_TYPE_INTEGER) {
348 rv = AE_TYPE;
349 goto out;
350 }
351
352 state.cs_power = obj->Integer.Value;
353
354 /*
355 * Register.
356 */
357 obj = &elm->Package.Elements[0];
358
359 if (obj->Type != ACPI_TYPE_BUFFER) {
360 rv = AE_TYPE;
361 goto out;
362 }
363
364 CTASSERT(sizeof(struct acpicpu_reg) == 15);
365
366 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
367 rv = AE_LIMIT;
368 goto out;
369 }
370
371 reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
372
373 switch (reg->reg_spaceid) {
374
375 case ACPI_ADR_SPACE_SYSTEM_IO:
376 state.cs_method = ACPICPU_C_STATE_SYSIO;
377
378 if (reg->reg_addr == 0) {
379 rv = AE_AML_ILLEGAL_ADDRESS;
380 goto out;
381 }
382
383 if (reg->reg_bitwidth != 8) {
384 rv = AE_AML_BAD_RESOURCE_LENGTH;
385 goto out;
386 }
387
388 state.cs_addr = reg->reg_addr;
389 break;
390
391 case ACPI_ADR_SPACE_FIXED_HARDWARE:
392 state.cs_method = ACPICPU_C_STATE_FFH;
393
394 switch (type) {
395
396 case ACPI_STATE_C1:
397
398 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0)
399 state.cs_method = ACPICPU_C_STATE_HALT;
400
401 break;
402
403 default:
404
405 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) {
406 rv = AE_SUPPORT;
407 goto out;
408 }
409 }
410
411 if (sc->sc_cap != 0) {
412
413 /*
414 * The _CST FFH GAS encoding may contain
415 * additional hints on Intel processors.
416 * Use these to determine whether we can
417 * avoid the bus master activity check.
418 */
419 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
420 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
421 }
422
423 break;
424
425 default:
426 rv = AE_AML_INVALID_SPACE_ID;
427 goto out;
428 }
429
430 /*
431 * As some systems define the type arbitrarily,
432 * we use a sequential counter instead of the
433 * BIOS data. For instance, AMD family 14h is
434 * instructed to only use the value 2; see
435 *
436 * Advanced Micro Devices: BIOS and Kernel
437 * Developer's Guide (BKDG) for AMD Family
438 * 14h Models 00h-0Fh Processors. Revision
439 * 3.00, January 4, 2011.
440 */
441 if (i != (int)type) {
442
443 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
444 "C%d != C%u from BIOS", i, type));
445 }
446
447 KASSERT(cs[i].cs_method == 0);
448
449 cs[i].cs_addr = state.cs_addr;
450 cs[i].cs_power = state.cs_power;
451 cs[i].cs_flags = state.cs_flags;
452 cs[i].cs_method = state.cs_method;
453 cs[i].cs_latency = state.cs_latency;
454
455 out:
456 if (ACPI_FAILURE(rv))
457 aprint_error_dev(sc->sc_dev, "failed to add "
458 "C-state: %s\n", AcpiFormatException(rv));
459
460 i++;
461
462 return rv;
463 }
464
465 static void
466 acpicpu_cstate_cst_bios(void)
467 {
468 const uint8_t val = AcpiGbl_FADT.CstControl;
469 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
470
471 if (addr == 0 || val == 0)
472 return;
473
474 (void)AcpiOsWritePort(addr, val, 8);
475 }
476
477 static void
478 acpicpu_cstate_memset(struct acpicpu_softc *sc)
479 {
480 uint8_t i = 0;
481
482 while (i < __arraycount(sc->sc_cstate)) {
483
484 sc->sc_cstate[i].cs_addr = 0;
485 sc->sc_cstate[i].cs_power = 0;
486 sc->sc_cstate[i].cs_flags = 0;
487 sc->sc_cstate[i].cs_method = 0;
488 sc->sc_cstate[i].cs_latency = 0;
489
490 i++;
491 }
492 }
493
494 static ACPI_STATUS
495 acpicpu_cstate_dep(struct acpicpu_softc *sc)
496 {
497 ACPI_OBJECT *elm, *obj;
498 ACPI_BUFFER buf;
499 ACPI_STATUS rv;
500 uint32_t val;
501 uint8_t i, n;
502
503 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CSD", &buf);
504
505 if (ACPI_FAILURE(rv))
506 goto out;
507
508 obj = buf.Pointer;
509
510 if (obj->Type != ACPI_TYPE_PACKAGE) {
511 rv = AE_TYPE;
512 goto out;
513 }
514
515 if (obj->Package.Count != 1) {
516 rv = AE_LIMIT;
517 goto out;
518 }
519
520 elm = &obj->Package.Elements[0];
521
522 if (obj->Type != ACPI_TYPE_PACKAGE) {
523 rv = AE_TYPE;
524 goto out;
525 }
526
527 n = elm->Package.Count;
528
529 if (n != 6) {
530 rv = AE_LIMIT;
531 goto out;
532 }
533
534 elm = elm->Package.Elements;
535
536 for (i = 0; i < n; i++) {
537
538 if (elm[i].Type != ACPI_TYPE_INTEGER) {
539 rv = AE_TYPE;
540 goto out;
541 }
542
543 if (elm[i].Integer.Value > UINT32_MAX) {
544 rv = AE_AML_NUMERIC_OVERFLOW;
545 goto out;
546 }
547 }
548
549 val = elm[1].Integer.Value;
550
551 if (val != 0)
552 aprint_debug_dev(sc->sc_dev, "invalid revision in _CSD\n");
553
554 val = elm[3].Integer.Value;
555
556 if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) {
557 rv = AE_AML_BAD_RESOURCE_VALUE;
558 goto out;
559 }
560
561 val = elm[4].Integer.Value;
562
563 if (val > sc->sc_ncpus) {
564 rv = AE_BAD_VALUE;
565 goto out;
566 }
567
568 sc->sc_cstate_dep.dep_domain = elm[2].Integer.Value;
569 sc->sc_cstate_dep.dep_type = elm[3].Integer.Value;
570 sc->sc_cstate_dep.dep_ncpus = elm[4].Integer.Value;
571 sc->sc_cstate_dep.dep_index = elm[5].Integer.Value;
572
573 out:
574 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
575 aprint_debug_dev(sc->sc_dev, "failed to evaluate "
576 "_CSD: %s\n", AcpiFormatException(rv));
577
578 if (buf.Pointer != NULL)
579 ACPI_FREE(buf.Pointer);
580
581 return rv;
582 }
583
584 static void
585 acpicpu_cstate_fadt(struct acpicpu_softc *sc)
586 {
587 struct acpicpu_cstate *cs = sc->sc_cstate;
588
589 acpicpu_cstate_memset(sc);
590
591 /*
592 * All x86 processors should support C1 (a.k.a. HALT).
593 */
594 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
595
596 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) == 0)
597 aprint_debug_dev(sc->sc_dev, "HALT not supported?\n");
598
599 if (sc->sc_object.ao_pblkaddr == 0)
600 return;
601
602 if (sc->sc_ncpus > 1) {
603
604 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
605 return;
606 }
607
608 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
609 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
610
611 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
612 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
613
614 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
615 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
616
617 /*
618 * The P_BLK length should always be 6. If it
619 * is not, reduce functionality accordingly.
620 */
621 if (sc->sc_object.ao_pblklen < 5)
622 cs[ACPI_STATE_C2].cs_method = 0;
623
624 if (sc->sc_object.ao_pblklen < 6)
625 cs[ACPI_STATE_C3].cs_method = 0;
626
627 /*
628 * Sanity check the latency levels in FADT.
629 * Values above the thresholds are used to
630 * inform that C-states are not supported.
631 */
632 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
633 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
634
635 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
636 cs[ACPI_STATE_C2].cs_method = 0;
637
638 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
639 cs[ACPI_STATE_C3].cs_method = 0;
640 }
641
642 static void
643 acpicpu_cstate_quirks(struct acpicpu_softc *sc)
644 {
645 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
646 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
647
648 /*
649 * Disable C3 for PIIX4.
650 */
651 if ((sc->sc_flags & ACPICPU_FLAG_PIIX4) != 0) {
652 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
653 return;
654 }
655
656 /*
657 * Check bus master arbitration. If ARB_DIS
658 * is not available, processor caches must be
659 * flushed before C3 (ACPI 4.0, section 8.2).
660 */
661 if (reg != 0 && len != 0) {
662 sc->sc_flags |= ACPICPU_FLAG_C_ARB;
663 return;
664 }
665
666 /*
667 * Disable C3 entirely if WBINVD is not present.
668 */
669 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
670 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
671 else {
672 /*
673 * If WBINVD is present and functioning properly,
674 * flush all processor caches before entering C3.
675 */
676 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
677 sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
678 else
679 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
680 }
681 }
682
683 static int
684 acpicpu_cstate_latency(struct acpicpu_softc *sc)
685 {
686 static const uint32_t cs_factor = 3;
687 struct acpicpu_cstate *cs;
688 int i;
689
690 for (i = cs_state_max; i > 0; i--) {
691
692 cs = &sc->sc_cstate[i];
693
694 if (__predict_false(cs->cs_method == 0))
695 continue;
696
697 /*
698 * Choose a state if we have previously slept
699 * longer than the worst case latency of the
700 * state times an arbitrary multiplier.
701 */
702 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
703 return i;
704 }
705
706 return ACPI_STATE_C1;
707 }
708
709 /*
710 * The main idle loop.
711 */
712 void
713 acpicpu_cstate_idle(void)
714 {
715 struct cpu_info *ci = curcpu();
716 struct acpicpu_softc *sc;
717 int state;
718
719 acpi_md_OsDisableInterrupt();
720
721 if (__predict_false(ci->ci_want_resched != 0))
722 goto out;
723
724 KASSERT(acpicpu_sc != NULL);
725 KASSERT(ci->ci_acpiid < maxcpus);
726
727 sc = acpicpu_sc[ci->ci_acpiid];
728
729 if (__predict_false(sc == NULL))
730 goto out;
731
732 KASSERT(ci->ci_ilevel == IPL_NONE);
733 KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0);
734
735 if (__predict_false(sc->sc_cold != false))
736 goto out;
737
738 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
739 goto out;
740
741 mutex_exit(&sc->sc_mtx);
742 state = acpicpu_cstate_latency(sc);
743
744 /*
745 * Apply AMD C1E quirk.
746 */
747 if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0)
748 acpicpu_md_quirk_c1e();
749
750 /*
751 * Check for bus master activity. Note that particularly usb(4)
752 * causes high activity, which may prevent the use of C3 states.
753 */
754 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
755
756 if (acpicpu_cstate_bm_check() != false)
757 state--;
758
759 if (__predict_false(sc->sc_cstate[state].cs_method == 0))
760 state = ACPI_STATE_C1;
761 }
762
763 KASSERT(state != ACPI_STATE_C0);
764
765 if (state != ACPI_STATE_C3) {
766 acpicpu_cstate_idle_enter(sc, state);
767 return;
768 }
769
770 /*
771 * On all recent (Intel) CPUs caches are shared
772 * by CPUs and bus master control is required to
773 * keep these coherent while in C3. Flushing the
774 * CPU caches is only the last resort.
775 */
776 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
777 ACPI_FLUSH_CPU_CACHE();
778
779 /*
780 * Allow the bus master to request that any given
781 * CPU should return immediately to C0 from C3.
782 */
783 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
784 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
785
786 /*
787 * It may be necessary to disable bus master arbitration
788 * to ensure that bus master cycles do not occur while
789 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
790 */
791 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
792 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
793
794 acpicpu_cstate_idle_enter(sc, state);
795
796 /*
797 * Disable bus master wake and re-enable the arbiter.
798 */
799 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
800 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
801
802 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
803 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
804
805 return;
806
807 out:
808 acpi_md_OsEnableInterrupt();
809 }
810
811 static void
812 acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
813 {
814 struct acpicpu_cstate *cs = &sc->sc_cstate[state];
815 uint32_t end, start, val;
816
817 start = acpitimer_read_fast(NULL);
818
819 switch (cs->cs_method) {
820
821 case ACPICPU_C_STATE_FFH:
822 case ACPICPU_C_STATE_HALT:
823 acpicpu_md_cstate_enter(cs->cs_method, state);
824 break;
825
826 case ACPICPU_C_STATE_SYSIO:
827 (void)AcpiOsReadPort(cs->cs_addr, &val, 8);
828 break;
829 }
830
831 acpi_md_OsEnableInterrupt();
832
833 cs->cs_evcnt.ev_count++;
834 end = acpitimer_read_fast(NULL);
835 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
836 }
837
838 static bool
839 acpicpu_cstate_bm_check(void)
840 {
841 uint32_t val = 0;
842 ACPI_STATUS rv;
843
844 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
845
846 if (ACPI_FAILURE(rv) || val == 0)
847 return false;
848
849 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
850
851 return true;
852 }
853