acpi_cpu_cstate.c revision 1.24 1 /* $NetBSD: acpi_cpu_cstate.c,v 1.24 2010/08/13 16:21:50 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.24 2010/08/13 16:21:50 jruoho Exp $");
31
32 #include <sys/param.h>
33 #include <sys/cpu.h>
34 #include <sys/device.h>
35 #include <sys/evcnt.h>
36 #include <sys/kernel.h>
37 #include <sys/once.h>
38 #include <sys/mutex.h>
39 #include <sys/timetc.h>
40
41 #include <dev/pci/pcivar.h>
42 #include <dev/pci/pcidevs.h>
43
44 #include <dev/acpi/acpireg.h>
45 #include <dev/acpi/acpivar.h>
46 #include <dev/acpi/acpi_cpu.h>
47 #include <dev/acpi/acpi_timer.h>
48
49 #include <machine/acpi_machdep.h>
50
51 #define _COMPONENT ACPI_BUS_COMPONENT
52 ACPI_MODULE_NAME ("acpi_cpu_cstate")
53
54 static void acpicpu_cstate_attach_print(struct acpicpu_softc *);
55 static void acpicpu_cstate_attach_evcnt(struct acpicpu_softc *);
56 static void acpicpu_cstate_detach_evcnt(struct acpicpu_softc *);
57 static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *);
58 static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *,
59 ACPI_OBJECT *);
60 static void acpicpu_cstate_cst_bios(void);
61 static void acpicpu_cstate_memset(struct acpicpu_softc *);
62 static void acpicpu_cstate_fadt(struct acpicpu_softc *);
63 static void acpicpu_cstate_quirks(struct acpicpu_softc *);
64 static int acpicpu_cstate_quirks_piix4(struct pci_attach_args *);
65 static int acpicpu_cstate_latency(struct acpicpu_softc *);
66 static bool acpicpu_cstate_bm_check(void);
67 static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
68
69 extern struct acpicpu_softc **acpicpu_sc;
70
71 /*
72 * XXX: The local APIC timer (as well as TSC) is typically stopped in C3.
73 * For now, we cannot but disable C3. But there appears to be timer-
74 * related interrupt issues also in C2. The only entirely safe option
75 * at the moment is to use C1.
76 */
77 #ifdef ACPICPU_ENABLE_C3
78 static int cs_state_max = ACPI_STATE_C3;
79 #else
80 static int cs_state_max = ACPI_STATE_C1;
81 #endif
82
83 void
84 acpicpu_cstate_attach(device_t self)
85 {
86 struct acpicpu_softc *sc = device_private(self);
87 ACPI_STATUS rv;
88
89 /*
90 * Either use the preferred _CST or resort to FADT.
91 */
92 rv = acpicpu_cstate_cst(sc);
93
94 switch (rv) {
95
96 case AE_OK:
97 acpicpu_cstate_cst_bios();
98 break;
99
100 default:
101 sc->sc_flags |= ACPICPU_FLAG_C_FADT;
102 acpicpu_cstate_fadt(sc);
103 break;
104 }
105
106 acpicpu_cstate_quirks(sc);
107 acpicpu_cstate_attach_evcnt(sc);
108 acpicpu_cstate_attach_print(sc);
109 }
110
111 void
112 acpicpu_cstate_attach_print(struct acpicpu_softc *sc)
113 {
114 struct acpicpu_cstate *cs;
115 static bool once = false;
116 const char *str;
117 int i;
118
119 if (once != false)
120 return;
121
122 for (i = 0; i < ACPI_C_STATE_COUNT; i++) {
123
124 cs = &sc->sc_cstate[i];
125
126 if (cs->cs_method == 0)
127 continue;
128
129 switch (cs->cs_method) {
130
131 case ACPICPU_C_STATE_HALT:
132 str = "HLT";
133 break;
134
135 case ACPICPU_C_STATE_FFH:
136 str = "FFH";
137 break;
138
139 case ACPICPU_C_STATE_SYSIO:
140 str = "I/O";
141 break;
142
143 default:
144 panic("NOTREACHED");
145 }
146
147 aprint_debug_dev(sc->sc_dev, "C%d: %3s, "
148 "lat %3u us, pow %5u mW, flags 0x%02x\n", i, str,
149 cs->cs_latency, cs->cs_power, cs->cs_flags);
150 }
151
152 once = true;
153 }
154
155 static void
156 acpicpu_cstate_attach_evcnt(struct acpicpu_softc *sc)
157 {
158 struct acpicpu_cstate *cs;
159 const char *str;
160 int i;
161
162 for (i = 0; i < ACPI_C_STATE_COUNT; i++) {
163
164 cs = &sc->sc_cstate[i];
165
166 if (cs->cs_method == 0)
167 continue;
168
169 str = "HALT";
170
171 if (cs->cs_method == ACPICPU_C_STATE_FFH)
172 str = "MWAIT";
173
174 if (cs->cs_method == ACPICPU_C_STATE_SYSIO)
175 str = "I/O";
176
177 (void)snprintf(cs->cs_name, sizeof(cs->cs_name),
178 "C%d (%s)", i, str);
179
180 evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC,
181 NULL, device_xname(sc->sc_dev), cs->cs_name);
182 }
183 }
184
185 int
186 acpicpu_cstate_detach(device_t self)
187 {
188 struct acpicpu_softc *sc = device_private(self);
189 static ONCE_DECL(once_detach);
190 int rv;
191
192 rv = RUN_ONCE(&once_detach, acpicpu_md_idle_stop);
193
194 if (rv != 0)
195 return rv;
196
197 sc->sc_flags &= ~ACPICPU_FLAG_C;
198 acpicpu_cstate_detach_evcnt(sc);
199
200 return 0;
201 }
202
203 static void
204 acpicpu_cstate_detach_evcnt(struct acpicpu_softc *sc)
205 {
206 struct acpicpu_cstate *cs;
207 int i;
208
209 for (i = 0; i < ACPI_C_STATE_COUNT; i++) {
210
211 cs = &sc->sc_cstate[i];
212
213 if (cs->cs_method != 0)
214 evcnt_detach(&cs->cs_evcnt);
215 }
216 }
217
218 int
219 acpicpu_cstate_start(device_t self)
220 {
221 struct acpicpu_softc *sc = device_private(self);
222 static ONCE_DECL(once_start);
223 int rv;
224
225 /*
226 * Save the existing idle-mechanism and claim the cpu_idle(9).
227 * This should be called after all ACPI CPUs have been attached.
228 */
229 rv = RUN_ONCE(&once_start, acpicpu_md_idle_start);
230
231 if (rv == 0)
232 sc->sc_flags |= ACPICPU_FLAG_C;
233
234 return rv;
235 }
236
237 bool
238 acpicpu_cstate_suspend(device_t self)
239 {
240
241 return true;
242 }
243
244 bool
245 acpicpu_cstate_resume(device_t self)
246 {
247 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
248 struct acpicpu_softc *sc = device_private(self);
249
250 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) == 0)
251 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
252
253 return true;
254 }
255
256 void
257 acpicpu_cstate_callback(void *aux)
258 {
259 struct acpicpu_softc *sc;
260 device_t self = aux;
261
262 sc = device_private(self);
263
264 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0)
265 return;
266
267 mutex_enter(&sc->sc_mtx);
268 (void)acpicpu_cstate_cst(sc);
269 mutex_exit(&sc->sc_mtx);
270 }
271
272 static ACPI_STATUS
273 acpicpu_cstate_cst(struct acpicpu_softc *sc)
274 {
275 ACPI_OBJECT *elm, *obj;
276 ACPI_BUFFER buf;
277 ACPI_STATUS rv;
278 uint32_t i, n;
279 uint8_t count;
280
281 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
282
283 if (ACPI_FAILURE(rv))
284 return rv;
285
286 obj = buf.Pointer;
287
288 if (obj->Type != ACPI_TYPE_PACKAGE) {
289 rv = AE_TYPE;
290 goto out;
291 }
292
293 if (obj->Package.Count < 2) {
294 rv = AE_LIMIT;
295 goto out;
296 }
297
298 elm = obj->Package.Elements;
299
300 if (elm[0].Type != ACPI_TYPE_INTEGER) {
301 rv = AE_TYPE;
302 goto out;
303 }
304
305 n = elm[0].Integer.Value;
306
307 if (n != obj->Package.Count - 1) {
308 rv = AE_BAD_VALUE;
309 goto out;
310 }
311
312 if (n > ACPI_C_STATES_MAX) {
313 rv = AE_LIMIT;
314 goto out;
315 }
316
317 acpicpu_cstate_memset(sc);
318
319 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
320 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
321
322 for (count = 0, i = 1; i <= n; i++) {
323
324 elm = &obj->Package.Elements[i];
325 rv = acpicpu_cstate_cst_add(sc, elm);
326
327 if (ACPI_SUCCESS(rv))
328 count++;
329 }
330
331 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
332
333 out:
334 if (buf.Pointer != NULL)
335 ACPI_FREE(buf.Pointer);
336
337 return rv;
338 }
339
340 static ACPI_STATUS
341 acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm)
342 {
343 const struct acpicpu_object *ao = &sc->sc_object;
344 struct acpicpu_cstate *cs = sc->sc_cstate;
345 struct acpicpu_cstate state;
346 struct acpicpu_reg *reg;
347 ACPI_STATUS rv = AE_OK;
348 ACPI_OBJECT *obj;
349 uint32_t type;
350
351 (void)memset(&state, 0, sizeof(*cs));
352
353 state.cs_flags = ACPICPU_FLAG_C_BM_STS;
354
355 if (elm->Type != ACPI_TYPE_PACKAGE) {
356 rv = AE_TYPE;
357 goto out;
358 }
359
360 if (elm->Package.Count != 4) {
361 rv = AE_LIMIT;
362 goto out;
363 }
364
365 /*
366 * Type.
367 */
368 obj = &elm->Package.Elements[1];
369
370 if (obj->Type != ACPI_TYPE_INTEGER) {
371 rv = AE_TYPE;
372 goto out;
373 }
374
375 type = obj->Integer.Value;
376
377 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
378 rv = AE_TYPE;
379 goto out;
380 }
381
382 /*
383 * Latency.
384 */
385 obj = &elm->Package.Elements[2];
386
387 if (obj->Type != ACPI_TYPE_INTEGER) {
388 rv = AE_TYPE;
389 goto out;
390 }
391
392 state.cs_latency = obj->Integer.Value;
393
394 /*
395 * Power.
396 */
397 obj = &elm->Package.Elements[3];
398
399 if (obj->Type != ACPI_TYPE_INTEGER) {
400 rv = AE_TYPE;
401 goto out;
402 }
403
404 state.cs_power = obj->Integer.Value;
405
406 /*
407 * Register.
408 */
409 obj = &elm->Package.Elements[0];
410
411 if (obj->Type != ACPI_TYPE_BUFFER) {
412 rv = AE_TYPE;
413 goto out;
414 }
415
416 CTASSERT(sizeof(struct acpicpu_reg) == 15);
417
418 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
419 rv = AE_LIMIT;
420 goto out;
421 }
422
423 reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
424
425 switch (reg->reg_spaceid) {
426
427 case ACPI_ADR_SPACE_SYSTEM_IO:
428 state.cs_method = ACPICPU_C_STATE_SYSIO;
429
430 if (reg->reg_addr == 0) {
431 rv = AE_AML_ILLEGAL_ADDRESS;
432 goto out;
433 }
434
435 if (reg->reg_bitwidth != 8) {
436 rv = AE_AML_BAD_RESOURCE_LENGTH;
437 goto out;
438 }
439
440 /*
441 * Check only that the address is in the mapped space.
442 * Systems are allowed to change it when operating
443 * with _CST (see ACPI 4.0, pp. 94-95). For instance,
444 * the offset of P_LVL3 may change depending on whether
445 * acpiacad(4) is connected or disconnected.
446 */
447 if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) {
448 rv = AE_BAD_ADDRESS;
449 goto out;
450 }
451
452 state.cs_addr = reg->reg_addr;
453 break;
454
455 case ACPI_ADR_SPACE_FIXED_HARDWARE:
456 state.cs_method = ACPICPU_C_STATE_FFH;
457
458 switch (type) {
459
460 case ACPI_STATE_C1:
461
462 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0)
463 state.cs_method = ACPICPU_C_STATE_HALT;
464
465 break;
466
467 default:
468
469 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) {
470 rv = AE_SUPPORT;
471 goto out;
472 }
473 }
474
475 if (sc->sc_cap != 0) {
476
477 /*
478 * The _CST FFH GAS encoding may contain
479 * additional hints on Intel processors.
480 * Use these to determine whether we can
481 * avoid the bus master activity check.
482 */
483 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
484 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
485 }
486
487 break;
488
489 default:
490 rv = AE_AML_INVALID_SPACE_ID;
491 goto out;
492 }
493
494 if (cs[type].cs_method != 0) {
495 rv = AE_ALREADY_EXISTS;
496 goto out;
497 }
498
499 cs[type].cs_addr = state.cs_addr;
500 cs[type].cs_power = state.cs_power;
501 cs[type].cs_flags = state.cs_flags;
502 cs[type].cs_method = state.cs_method;
503 cs[type].cs_latency = state.cs_latency;
504
505 out:
506 if (ACPI_FAILURE(rv))
507 aprint_debug_dev(sc->sc_dev, "invalid "
508 "_CST: %s\n", AcpiFormatException(rv));
509
510 return rv;
511 }
512
513 static void
514 acpicpu_cstate_cst_bios(void)
515 {
516 const uint8_t val = AcpiGbl_FADT.CstControl;
517 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
518
519 if (addr == 0)
520 return;
521
522 (void)AcpiOsWritePort(addr, val, 8);
523 }
524
525 static void
526 acpicpu_cstate_memset(struct acpicpu_softc *sc)
527 {
528 int i = 0;
529
530 while (i < ACPI_C_STATE_COUNT) {
531
532 sc->sc_cstate[i].cs_addr = 0;
533 sc->sc_cstate[i].cs_power = 0;
534 sc->sc_cstate[i].cs_flags = 0;
535 sc->sc_cstate[i].cs_method = 0;
536 sc->sc_cstate[i].cs_latency = 0;
537
538 i++;
539 }
540 }
541
542 static void
543 acpicpu_cstate_fadt(struct acpicpu_softc *sc)
544 {
545 struct acpicpu_cstate *cs = sc->sc_cstate;
546
547 acpicpu_cstate_memset(sc);
548
549 /*
550 * All x86 processors should support C1 (a.k.a. HALT).
551 */
552 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0)
553 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
554
555 if (sc->sc_object.ao_pblkaddr == 0)
556 return;
557
558 if (acpicpu_md_cpus_running() > 1) {
559
560 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
561 return;
562 }
563
564 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
565 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
566
567 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
568 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
569
570 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
571 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
572
573 /*
574 * The P_BLK length should always be 6. If it
575 * is not, reduce functionality accordingly.
576 * Sanity check also FADT's latency levels.
577 */
578 if (sc->sc_object.ao_pblklen < 5)
579 cs[ACPI_STATE_C2].cs_method = 0;
580
581 if (sc->sc_object.ao_pblklen < 6)
582 cs[ACPI_STATE_C3].cs_method = 0;
583
584 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
585 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
586
587 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
588 cs[ACPI_STATE_C2].cs_method = 0;
589
590 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
591 cs[ACPI_STATE_C3].cs_method = 0;
592 }
593
594 static void
595 acpicpu_cstate_quirks(struct acpicpu_softc *sc)
596 {
597 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
598 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
599 struct pci_attach_args pa;
600
601 /*
602 * Check bus master arbitration. If ARB_DIS
603 * is not available, processor caches must be
604 * flushed before C3 (ACPI 4.0, section 8.2).
605 */
606 if (reg != 0 && len != 0)
607 sc->sc_flags |= ACPICPU_FLAG_C_ARB;
608 else {
609 /*
610 * Disable C3 entirely if WBINVD is not present.
611 */
612 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
613 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
614 else {
615 /*
616 * If WBINVD is present and functioning properly,
617 * flush all processor caches before entering C3.
618 */
619 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
620 sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
621 else
622 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
623 }
624 }
625
626 /*
627 * There are several erratums for PIIX4.
628 */
629 if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0)
630 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
631
632 if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0)
633 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
634 }
635
636 static int
637 acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa)
638 {
639
640 /*
641 * XXX: The pci_find_device(9) function only deals with
642 * attached devices. Change this to use something like
643 * pci_device_foreach(), and implement it for IA-64.
644 */
645 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
646 return 0;
647
648 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA ||
649 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC)
650 return 1;
651
652 return 0;
653 }
654
655 static int
656 acpicpu_cstate_latency(struct acpicpu_softc *sc)
657 {
658 static const uint32_t cs_factor = 3;
659 struct acpicpu_cstate *cs;
660 int i;
661
662 for (i = cs_state_max; i > 0; i--) {
663
664 cs = &sc->sc_cstate[i];
665
666 if (__predict_false(cs->cs_method == 0))
667 continue;
668
669 /*
670 * Choose a state if we have previously slept
671 * longer than the worst case latency of the
672 * state times an arbitrary multiplier.
673 */
674 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
675 return i;
676 }
677
678 return ACPI_STATE_C1;
679 }
680
681 /*
682 * The main idle loop.
683 */
684 void
685 acpicpu_cstate_idle(void)
686 {
687 struct cpu_info *ci = curcpu();
688 struct acpicpu_softc *sc;
689 int state;
690
691 if (__predict_false(ci->ci_want_resched) != 0)
692 return;
693
694 acpi_md_OsDisableInterrupt();
695
696 KASSERT(acpicpu_sc != NULL);
697 KASSERT(ci->ci_acpiid < maxcpus);
698 KASSERT(ci->ci_ilevel == IPL_NONE);
699
700 sc = acpicpu_sc[ci->ci_acpiid];
701
702 if (__predict_false(sc == NULL))
703 goto halt;
704
705 if (__predict_false(sc->sc_cold != false))
706 goto halt;
707
708 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_C) == 0))
709 goto halt;
710
711 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
712 goto halt;
713
714 mutex_exit(&sc->sc_mtx);
715 state = acpicpu_cstate_latency(sc);
716
717 /*
718 * Check for bus master activity. Note that particularly usb(4)
719 * causes high activity, which may prevent the use of C3 states.
720 */
721 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
722
723 if (acpicpu_cstate_bm_check() != false)
724 state--;
725
726 if (__predict_false(sc->sc_cstate[state].cs_method == 0))
727 state = ACPI_STATE_C1;
728 }
729
730 KASSERT(state != ACPI_STATE_C0);
731
732 if (state != ACPI_STATE_C3) {
733 acpicpu_cstate_idle_enter(sc, state);
734 return;
735 }
736
737 /*
738 * On all recent (Intel) CPUs caches are shared
739 * by CPUs and bus master control is required to
740 * keep these coherent while in C3. Flushing the
741 * CPU caches is only the last resort.
742 */
743 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
744 ACPI_FLUSH_CPU_CACHE();
745
746 /*
747 * Allow the bus master to request that any given
748 * CPU should return immediately to C0 from C3.
749 */
750 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
751 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
752
753 /*
754 * It may be necessary to disable bus master arbitration
755 * to ensure that bus master cycles do not occur while
756 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
757 */
758 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
759 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
760
761 acpicpu_cstate_idle_enter(sc, state);
762
763 /*
764 * Disable bus master wake and re-enable the arbiter.
765 */
766 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
767 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
768
769 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
770 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
771
772 return;
773
774 halt:
775 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
776 }
777
778 static void
779 acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
780 {
781 struct acpicpu_cstate *cs = &sc->sc_cstate[state];
782 uint32_t end, start, val;
783
784 start = acpitimer_read_safe(NULL);
785
786 switch (cs->cs_method) {
787
788 case ACPICPU_C_STATE_FFH:
789 case ACPICPU_C_STATE_HALT:
790 acpicpu_md_idle_enter(cs->cs_method, state);
791 break;
792
793 case ACPICPU_C_STATE_SYSIO:
794 (void)AcpiOsReadPort(cs->cs_addr, &val, 8);
795 break;
796
797 default:
798 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
799 break;
800 }
801
802 cs->cs_evcnt.ev_count++;
803
804 end = acpitimer_read_safe(NULL);
805 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
806
807 acpi_md_OsEnableInterrupt();
808 }
809
810 static bool
811 acpicpu_cstate_bm_check(void)
812 {
813 uint32_t val = 0;
814 ACPI_STATUS rv;
815
816 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
817
818 if (ACPI_FAILURE(rv) || val == 0)
819 return false;
820
821 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
822
823 return true;
824 }
825