Home | History | Annotate | Line # | Download | only in acpi
      1 /* $NetBSD: acpi_cpu_cstate.c,v 1.63 2020/12/07 10:57:41 jmcneill Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  *
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27  * SUCH DAMAGE.
     28  */
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.63 2020/12/07 10:57:41 jmcneill Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/cpu.h>
     34 #include <sys/device.h>
     35 #include <sys/kernel.h>
     36 #include <sys/mutex.h>
     37 #include <sys/timetc.h>
     38 
     39 #include <dev/acpi/acpireg.h>
     40 #include <dev/acpi/acpivar.h>
     41 #include <dev/acpi/acpi_cpu.h>
     42 #include <dev/acpi/acpi_timer.h>
     43 
     44 #include <machine/acpi_machdep.h>
     45 
     46 #define _COMPONENT	 ACPI_BUS_COMPONENT
     47 ACPI_MODULE_NAME	 ("acpi_cpu_cstate")
     48 
     49 static ACPI_STATUS	 acpicpu_cstate_cst(struct acpicpu_softc *);
     50 static ACPI_STATUS	 acpicpu_cstate_cst_add(struct acpicpu_softc *,
     51 						ACPI_OBJECT *);
     52 static void		 acpicpu_cstate_cst_bios(void);
     53 static void		 acpicpu_cstate_memset(struct acpicpu_softc *);
     54 static ACPI_STATUS	 acpicpu_cstate_dep(struct acpicpu_softc *);
     55 static void		 acpicpu_cstate_fadt(struct acpicpu_softc *);
     56 static void		 acpicpu_cstate_quirks(struct acpicpu_softc *);
     57 static int		 acpicpu_cstate_latency(struct acpicpu_softc *);
     58 static bool		 acpicpu_cstate_bm_check(void);
     59 static void		 acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
     60 
     61 extern struct acpicpu_softc **acpicpu_sc;
     62 
     63 /*
     64  * XXX:	The local APIC timer (as well as TSC) is typically stopped in C3.
     65  *	For now, we cannot but disable C3. But there appears to be timer-
     66  *	related interrupt issues also in C2. The only entirely safe option
     67  *	at the moment is to use C1.
     68  */
     69 #ifdef ACPICPU_ENABLE_C3
     70 static int cs_state_max = ACPI_STATE_C3;
     71 #else
     72 static int cs_state_max = ACPI_STATE_C1;
     73 #endif
     74 
     75 void
     76 acpicpu_cstate_attach(device_t self)
     77 {
     78 	struct acpicpu_softc *sc = device_private(self);
     79 	ACPI_STATUS rv;
     80 
     81 	/*
     82 	 * Either use the preferred _CST or resort to FADT.
     83 	 */
     84 	rv = acpicpu_cstate_cst(sc);
     85 
     86 	switch (rv) {
     87 
     88 	case AE_OK:
     89 		acpicpu_cstate_cst_bios();
     90 		break;
     91 
     92 	default:
     93 		sc->sc_flags |= ACPICPU_FLAG_C_FADT;
     94 		acpicpu_cstate_fadt(sc);
     95 		break;
     96 	}
     97 
     98 	/*
     99 	 * Query the optional _CSD.
    100 	 */
    101 	rv = acpicpu_cstate_dep(sc);
    102 
    103 	if (ACPI_SUCCESS(rv))
    104 		sc->sc_flags |= ACPICPU_FLAG_C_DEP;
    105 
    106 	sc->sc_flags |= ACPICPU_FLAG_C;
    107 
    108 	acpicpu_cstate_quirks(sc);
    109 }
    110 
    111 void
    112 acpicpu_cstate_detach(device_t self)
    113 {
    114 	struct acpicpu_softc *sc = device_private(self);
    115 
    116 	if ((sc->sc_flags & ACPICPU_FLAG_C) == 0)
    117 		return;
    118 
    119 	(void)acpicpu_md_cstate_stop();
    120 
    121 	sc->sc_flags &= ~ACPICPU_FLAG_C;
    122 }
    123 
    124 void
    125 acpicpu_cstate_start(device_t self)
    126 {
    127 	struct acpicpu_softc *sc = device_private(self);
    128 
    129 	(void)acpicpu_md_cstate_start(sc);
    130 }
    131 
    132 void
    133 acpicpu_cstate_suspend(void *aux)
    134 {
    135 	/* Nothing. */
    136 }
    137 
    138 void
    139 acpicpu_cstate_resume(void *aux)
    140 {
    141 	acpicpu_cstate_callback(aux);
    142 }
    143 
    144 void
    145 acpicpu_cstate_callback(void *aux)
    146 {
    147 	struct acpicpu_softc *sc;
    148 	device_t self = aux;
    149 
    150 	sc = device_private(self);
    151 
    152 	if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0)
    153 		return;
    154 
    155 	mutex_enter(&sc->sc_mtx);
    156 	(void)acpicpu_cstate_cst(sc);
    157 	mutex_exit(&sc->sc_mtx);
    158 }
    159 
    160 static ACPI_STATUS
    161 acpicpu_cstate_cst(struct acpicpu_softc *sc)
    162 {
    163 	ACPI_OBJECT *elm, *obj;
    164 	ACPI_BUFFER buf;
    165 	ACPI_STATUS rv;
    166 	uint32_t i, n;
    167 	uint8_t count;
    168 
    169 	rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
    170 
    171 	if (ACPI_FAILURE(rv))
    172 		return rv;
    173 
    174 	obj = buf.Pointer;
    175 
    176 	if (obj->Type != ACPI_TYPE_PACKAGE) {
    177 		rv = AE_TYPE;
    178 		goto out;
    179 	}
    180 
    181 	if (obj->Package.Count < 2) {
    182 		rv = AE_LIMIT;
    183 		goto out;
    184 	}
    185 
    186 	elm = obj->Package.Elements;
    187 
    188 	if (elm[0].Type != ACPI_TYPE_INTEGER) {
    189 		rv = AE_TYPE;
    190 		goto out;
    191 	}
    192 
    193 	n = elm[0].Integer.Value;
    194 
    195 	if (n != obj->Package.Count - 1) {
    196 		rv = AE_BAD_VALUE;
    197 		goto out;
    198 	}
    199 
    200 	if (n > ACPI_C_STATES_MAX) {
    201 		rv = AE_LIMIT;
    202 		goto out;
    203 	}
    204 
    205 	acpicpu_cstate_memset(sc);
    206 
    207 	/*
    208 	 * All x86 processors should support C1 (a.k.a. HALT).
    209 	 */
    210 	sc->sc_cstate[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
    211 
    212 	CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
    213 	CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
    214 
    215 	for (count = 0, i = 1; i <= n; i++) {
    216 
    217 		elm = &obj->Package.Elements[i];
    218 		rv = acpicpu_cstate_cst_add(sc, elm);
    219 
    220 		if (ACPI_SUCCESS(rv))
    221 			count++;
    222 	}
    223 
    224 	rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
    225 
    226 out:
    227 	if (buf.Pointer != NULL)
    228 		ACPI_FREE(buf.Pointer);
    229 
    230 	return rv;
    231 }
    232 
    233 static ACPI_STATUS
    234 acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm)
    235 {
    236 	struct acpicpu_cstate *cs = sc->sc_cstate;
    237 	struct acpicpu_cstate state;
    238 	struct acpicpu_reg *reg;
    239 	ACPI_STATUS rv = AE_OK;
    240 	ACPI_OBJECT *obj;
    241 	uint32_t type;
    242 
    243 	(void)memset(&state, 0, sizeof(*cs));
    244 
    245 	if (elm->Type != ACPI_TYPE_PACKAGE) {
    246 		rv = AE_TYPE;
    247 		goto out;
    248 	}
    249 
    250 	if (elm->Package.Count != 4) {
    251 		rv = AE_LIMIT;
    252 		goto out;
    253 	}
    254 
    255 	/*
    256 	 * Type.
    257 	 */
    258 	obj = &elm->Package.Elements[1];
    259 
    260 	if (obj->Type != ACPI_TYPE_INTEGER) {
    261 		rv = AE_TYPE;
    262 		goto out;
    263 	}
    264 
    265 	type = obj->Integer.Value;
    266 
    267 	if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
    268 		rv = AE_TYPE;
    269 		goto out;
    270 	}
    271 
    272 	/*
    273 	 * Latency.
    274 	 */
    275 	obj = &elm->Package.Elements[2];
    276 
    277 	if (obj->Type != ACPI_TYPE_INTEGER) {
    278 		rv = AE_TYPE;
    279 		goto out;
    280 	}
    281 
    282 	state.cs_latency = obj->Integer.Value;
    283 
    284 	/*
    285 	 * Power.
    286 	 */
    287 	obj = &elm->Package.Elements[3];
    288 
    289 	if (obj->Type != ACPI_TYPE_INTEGER) {
    290 		rv = AE_TYPE;
    291 		goto out;
    292 	}
    293 
    294 	state.cs_power = obj->Integer.Value;
    295 
    296 	/*
    297 	 * Register.
    298 	 */
    299 	obj = &elm->Package.Elements[0];
    300 
    301 	if (obj->Type != ACPI_TYPE_BUFFER) {
    302 		rv = AE_TYPE;
    303 		goto out;
    304 	}
    305 
    306 	CTASSERT(sizeof(struct acpicpu_reg) == 15);
    307 
    308 	if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
    309 		rv = AE_LIMIT;
    310 		goto out;
    311 	}
    312 
    313 	reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
    314 
    315 	switch (reg->reg_spaceid) {
    316 
    317 	case ACPI_ADR_SPACE_SYSTEM_IO:
    318 		state.cs_method = ACPICPU_C_STATE_SYSIO;
    319 
    320 		if (reg->reg_addr == 0) {
    321 			rv = AE_AML_ILLEGAL_ADDRESS;
    322 			goto out;
    323 		}
    324 
    325 		if (reg->reg_bitwidth != 8) {
    326 			rv = AE_AML_BAD_RESOURCE_LENGTH;
    327 			goto out;
    328 		}
    329 
    330 		state.cs_addr = reg->reg_addr;
    331 		break;
    332 
    333 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
    334 		state.cs_method = ACPICPU_C_STATE_FFH;
    335 
    336 		switch (type) {
    337 
    338 		case ACPI_STATE_C1:
    339 
    340 			/*
    341 			 * If ACPI wants native access (FFH), but the
    342 			 * MD code does not support MONITOR/MWAIT, use
    343 			 * HLT for C1 and error out for higher C-states.
    344 			 */
    345 			if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0)
    346 				state.cs_method = ACPICPU_C_STATE_HALT;
    347 
    348 			break;
    349 
    350 		case ACPI_STATE_C3:
    351 			state.cs_flags = ACPICPU_FLAG_C_BM_STS;
    352 
    353 			/* FALLTHROUGH */
    354 		default:
    355 
    356 			if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) {
    357 				rv = AE_SUPPORT;
    358 				goto out;
    359 			}
    360 		}
    361 
    362 		if (sc->sc_cap != 0) {
    363 
    364 			/*
    365 			 * The _CST FFH GAS encoding may contain
    366 			 * additional hints on Intel processors.
    367 			 * Use these to determine whether we can
    368 			 * avoid the bus master activity check.
    369 			 */
    370 			if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
    371 				state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
    372 		}
    373 
    374 		break;
    375 
    376 	default:
    377 		rv = AE_AML_INVALID_SPACE_ID;
    378 		goto out;
    379 	}
    380 
    381 	cs[type].cs_addr = state.cs_addr;
    382 	cs[type].cs_power = state.cs_power;
    383 	cs[type].cs_flags = state.cs_flags;
    384 	cs[type].cs_method = state.cs_method;
    385 	cs[type].cs_latency = state.cs_latency;
    386 
    387 out:
    388 	if (ACPI_FAILURE(rv))
    389 		aprint_error_dev(sc->sc_dev, "failed to add "
    390 		    "C-state: %s\n", AcpiFormatException(rv));
    391 
    392 	return rv;
    393 }
    394 
    395 static void
    396 acpicpu_cstate_cst_bios(void)
    397 {
    398 	const uint8_t val = AcpiGbl_FADT.CstControl;
    399 	const uint32_t addr = AcpiGbl_FADT.SmiCommand;
    400 
    401 	if (addr == 0 || val == 0)
    402 		return;
    403 
    404 	(void)AcpiOsWritePort(addr, val, 8);
    405 }
    406 
    407 static void
    408 acpicpu_cstate_memset(struct acpicpu_softc *sc)
    409 {
    410 	uint8_t i = 0;
    411 
    412 	while (i < __arraycount(sc->sc_cstate)) {
    413 
    414 		sc->sc_cstate[i].cs_addr = 0;
    415 		sc->sc_cstate[i].cs_power = 0;
    416 		sc->sc_cstate[i].cs_flags = 0;
    417 		sc->sc_cstate[i].cs_method = 0;
    418 		sc->sc_cstate[i].cs_latency = 0;
    419 
    420 		i++;
    421 	}
    422 }
    423 
    424 static ACPI_STATUS
    425 acpicpu_cstate_dep(struct acpicpu_softc *sc)
    426 {
    427 	ACPI_OBJECT *elm, *obj;
    428 	ACPI_BUFFER buf;
    429 	ACPI_STATUS rv;
    430 	uint32_t val;
    431 	uint8_t i, n;
    432 
    433 	rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CSD", &buf);
    434 
    435 	if (ACPI_FAILURE(rv))
    436 		goto out;
    437 
    438 	obj = buf.Pointer;
    439 
    440 	if (obj->Type != ACPI_TYPE_PACKAGE) {
    441 		rv = AE_TYPE;
    442 		goto out;
    443 	}
    444 
    445 	if (obj->Package.Count != 1) {
    446 		rv = AE_LIMIT;
    447 		goto out;
    448 	}
    449 
    450 	elm = &obj->Package.Elements[0];
    451 
    452 	if (obj->Type != ACPI_TYPE_PACKAGE) {
    453 		rv = AE_TYPE;
    454 		goto out;
    455 	}
    456 
    457 	n = elm->Package.Count;
    458 
    459 	if (n != 6) {
    460 		rv = AE_LIMIT;
    461 		goto out;
    462 	}
    463 
    464 	elm = elm->Package.Elements;
    465 
    466 	for (i = 0; i < n; i++) {
    467 
    468 		if (elm[i].Type != ACPI_TYPE_INTEGER) {
    469 			rv = AE_TYPE;
    470 			goto out;
    471 		}
    472 
    473 		if (elm[i].Integer.Value > UINT32_MAX) {
    474 			rv = AE_AML_NUMERIC_OVERFLOW;
    475 			goto out;
    476 		}
    477 	}
    478 
    479 	val = elm[1].Integer.Value;
    480 
    481 	if (val != 0)
    482 		aprint_debug_dev(sc->sc_dev, "invalid revision in _CSD\n");
    483 
    484 	val = elm[3].Integer.Value;
    485 
    486 	if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) {
    487 		rv = AE_AML_BAD_RESOURCE_VALUE;
    488 		goto out;
    489 	}
    490 
    491 	val = elm[4].Integer.Value;
    492 
    493 	if (val > sc->sc_ncpus) {
    494 		rv = AE_BAD_VALUE;
    495 		goto out;
    496 	}
    497 
    498 	sc->sc_cstate_dep.dep_domain = elm[2].Integer.Value;
    499 	sc->sc_cstate_dep.dep_type   = elm[3].Integer.Value;
    500 	sc->sc_cstate_dep.dep_ncpus  = elm[4].Integer.Value;
    501 	sc->sc_cstate_dep.dep_index  = elm[5].Integer.Value;
    502 
    503 out:
    504 	if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
    505 		aprint_debug_dev(sc->sc_dev, "failed to evaluate "
    506 		    "_CSD: %s\n", AcpiFormatException(rv));
    507 
    508 	if (buf.Pointer != NULL)
    509 		ACPI_FREE(buf.Pointer);
    510 
    511 	return rv;
    512 }
    513 
    514 static void
    515 acpicpu_cstate_fadt(struct acpicpu_softc *sc)
    516 {
    517 	struct acpicpu_cstate *cs = sc->sc_cstate;
    518 
    519 	acpicpu_cstate_memset(sc);
    520 
    521 	/*
    522 	 * All x86 processors should support C1 (a.k.a. HALT).
    523 	 */
    524 	cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
    525 
    526 	if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) == 0)
    527 		aprint_debug_dev(sc->sc_dev, "HALT not supported?\n");
    528 
    529 	if (sc->sc_object.ao_pblkaddr == 0)
    530 		return;
    531 
    532 	if (sc->sc_ncpus > 1) {
    533 
    534 		if ((AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
    535 			return;
    536 	}
    537 
    538 	cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
    539 	cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
    540 
    541 	cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
    542 	cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
    543 
    544 	cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
    545 	cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
    546 
    547 	/*
    548 	 * The P_BLK length should always be 6. If it
    549 	 * is not, reduce functionality accordingly.
    550 	 */
    551 	if (sc->sc_object.ao_pblklen < 5)
    552 		cs[ACPI_STATE_C2].cs_method = 0;
    553 
    554 	if (sc->sc_object.ao_pblklen < 6)
    555 		cs[ACPI_STATE_C3].cs_method = 0;
    556 
    557 	/*
    558 	 * Sanity check the latency levels in FADT. Values above
    559 	 * the thresholds may be used to inform that C2 and C3 are
    560 	 * not supported -- AMD family 11h is an example;
    561 	 *
    562 	 *	Advanced Micro Devices: BIOS and Kernel Developer's
    563 	 *	Guide (BKDG) for AMD Family 11h Processors. Section
    564 	 *	2.4.3, Revision 3.00, July, 2008.
    565 	 */
    566 	CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
    567 	CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
    568 
    569 	if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
    570 		cs[ACPI_STATE_C2].cs_method = 0;
    571 
    572 	if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
    573 		cs[ACPI_STATE_C3].cs_method = 0;
    574 }
    575 
    576 static void
    577 acpicpu_cstate_quirks(struct acpicpu_softc *sc)
    578 {
    579 	const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
    580 	const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
    581 
    582 	/*
    583 	 * Disable C3 for PIIX4.
    584 	 */
    585 	if ((sc->sc_flags & ACPICPU_FLAG_PIIX4) != 0) {
    586 		sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
    587 		return;
    588 	}
    589 
    590 	/*
    591 	 * Check bus master arbitration. If ARB_DIS
    592 	 * is not available, processor caches must be
    593 	 * flushed before C3 (ACPI 4.0, section 8.2).
    594 	 */
    595 	if (reg != 0 && len != 0) {
    596 		sc->sc_flags |= ACPICPU_FLAG_C_ARB;
    597 		return;
    598 	}
    599 
    600 	/*
    601 	 * Disable C3 entirely if WBINVD is not present.
    602 	 */
    603 	if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
    604 		sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
    605 	else {
    606 		/*
    607 		 * If WBINVD is present and functioning properly,
    608 		 * flush all processor caches before entering C3.
    609 		 */
    610 		if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
    611 			sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
    612 		else
    613 			sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
    614 	}
    615 }
    616 
    617 static int
    618 acpicpu_cstate_latency(struct acpicpu_softc *sc)
    619 {
    620 	static const uint32_t cs_factor = 3;
    621 	struct acpicpu_cstate *cs;
    622 	int i;
    623 
    624 	KASSERT(mutex_owned(&sc->sc_mtx) != 0);
    625 
    626 	for (i = cs_state_max; i > 0; i--) {
    627 
    628 		cs = &sc->sc_cstate[i];
    629 
    630 		if (__predict_false(cs->cs_method == 0))
    631 			continue;
    632 
    633 		/*
    634 		 * Choose a state if we have previously slept
    635 		 * longer than the worst case latency of the
    636 		 * state times an arbitrary multiplier.
    637 		 */
    638 		if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
    639 			return i;
    640 	}
    641 
    642 	return ACPI_STATE_C1;
    643 }
    644 
    645 /*
    646  * The main idle loop.
    647  */
    648 void
    649 acpicpu_cstate_idle(void)
    650 {
    651 	struct cpu_info *ci = curcpu();
    652 	struct acpicpu_softc *sc;
    653 	int state;
    654 
    655 	KASSERT(acpicpu_sc != NULL);
    656 	KASSERT(ci->ci_acpiid < maxcpus);
    657 
    658 	sc = acpicpu_sc[ci->ci_acpiid];
    659 
    660 	if (__predict_false(sc == NULL))
    661 		return;
    662 
    663 #if defined(__i386__) || defined(__x86_64__)
    664 	KASSERT(ci->ci_ilevel == IPL_NONE);
    665 #elif defined(__aarch64__)
    666 	KASSERT(ci->ci_cpl == IPL_NONE);
    667 #endif
    668 	KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0);
    669 
    670 	if (__predict_false(sc->sc_cold != false))
    671 		return;
    672 
    673 	if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
    674 		return;
    675 
    676 	state = acpicpu_cstate_latency(sc);
    677 	mutex_exit(&sc->sc_mtx);
    678 
    679 #if defined(__i386__) || defined(__x86_64__)
    680 	/*
    681 	 * Apply AMD C1E quirk.
    682 	 */
    683 	if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0)
    684 		acpicpu_md_quirk_c1e();
    685 #endif
    686 
    687 	/*
    688 	 * Check for bus master activity. Note that particularly usb(4)
    689 	 * causes high activity, which may prevent the use of C3 states.
    690 	 */
    691 	if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
    692 
    693 		if (acpicpu_cstate_bm_check() != false)
    694 			state--;
    695 
    696 		if (__predict_false(sc->sc_cstate[state].cs_method == 0))
    697 			state = ACPI_STATE_C1;
    698 	}
    699 
    700 	KASSERT(state != ACPI_STATE_C0);
    701 
    702 	if (state != ACPI_STATE_C3) {
    703 		acpicpu_cstate_idle_enter(sc, state);
    704 		return;
    705 	}
    706 
    707 	/*
    708 	 * On all recent (Intel) CPUs caches are shared
    709 	 * by CPUs and bus master control is required to
    710 	 * keep these coherent while in C3. Flushing the
    711 	 * CPU caches is only the last resort.
    712 	 */
    713 	if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
    714 		ACPI_FLUSH_CPU_CACHE();
    715 
    716 	/*
    717 	 * Allow the bus master to request that any given
    718 	 * CPU should return immediately to C0 from C3.
    719 	 */
    720 	if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
    721 		(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
    722 
    723 	/*
    724 	 * It may be necessary to disable bus master arbitration
    725 	 * to ensure that bus master cycles do not occur while
    726 	 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
    727 	 */
    728 	if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
    729 		(void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
    730 
    731 	acpicpu_cstate_idle_enter(sc, state);
    732 
    733 	/*
    734 	 * Disable bus master wake and re-enable the arbiter.
    735 	 */
    736 	if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
    737 		(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
    738 
    739 	if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
    740 		(void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
    741 }
    742 
    743 static void
    744 acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
    745 {
    746 	struct acpicpu_cstate *cs = &sc->sc_cstate[state];
    747 	uint32_t val;
    748 
    749 #ifdef notyet
    750 	/*
    751 	 * XXX This has a significant performance impact because the ACPI
    752 	 * timer seems very slow and with many CPUs becomes a chokepoint.
    753 	 * Better to use the TSC (if invariant) or APIC timer instead.
    754 	 * Proably even getbintime().  Disabled for now as no functional
    755 	 * change - only C1 sleep is enabled.
    756 	 */
    757 	start = acpitimer_read_fast(NULL);
    758 #endif
    759 
    760 	switch (cs->cs_method) {
    761 
    762 	case ACPICPU_C_STATE_FFH:
    763 	case ACPICPU_C_STATE_HALT:
    764 		acpicpu_md_cstate_enter(cs->cs_method, state);
    765 		break;
    766 
    767 	case ACPICPU_C_STATE_SYSIO:
    768 		(void)AcpiOsReadPort(cs->cs_addr, &val, 8);
    769 		break;
    770 	}
    771 
    772 	cs->cs_evcnt.ev_count++;
    773 
    774 #ifdef notyet
    775 	/*
    776 	 * XXX As above.  Also, hztoms() seems incorrect as the ACPI timer
    777 	 * is running the MHz region.
    778 	 */
    779 	end = acpitimer_read_fast(NULL);
    780 	sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
    781 #endif
    782 }
    783 
    784 static bool
    785 acpicpu_cstate_bm_check(void)
    786 {
    787 	uint32_t val = 0;
    788 	ACPI_STATUS rv;
    789 
    790 	rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
    791 
    792 	if (ACPI_FAILURE(rv) || val == 0)
    793 		return false;
    794 
    795 	(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
    796 
    797 	return true;
    798 }
    799