Home | History | Annotate | Line # | Download | only in acpi
acpi_cpu_md.c revision 1.6
      1 /* $NetBSD: acpi_cpu_md.c,v 1.6 2010/08/09 04:18:48 jruoho Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  *
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27  * SUCH DAMAGE.
     28  */
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.6 2010/08/09 04:18:48 jruoho Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/bus.h>
     34 #include <sys/kcore.h>
     35 #include <sys/sysctl.h>
     36 #include <sys/xcall.h>
     37 
     38 #include <x86/cpu.h>
     39 #include <x86/cpufunc.h>
     40 #include <x86/cputypes.h>
     41 #include <x86/cpuvar.h>
     42 #include <x86/cpu_msr.h>
     43 #include <x86/machdep.h>
     44 
     45 #include <dev/acpi/acpica.h>
     46 #include <dev/acpi/acpi_cpu.h>
     47 
     48 static char	  native_idle_text[16];
     49 void		(*native_idle)(void) = NULL;
     50 
     51 static int	 acpicpu_md_pstate_sysctl_get(SYSCTLFN_PROTO);
     52 static int	 acpicpu_md_pstate_sysctl_set(SYSCTLFN_PROTO);
     53 static int	 acpicpu_md_pstate_sysctl_all(SYSCTLFN_PROTO);
     54 
     55 extern uint32_t cpus_running;
     56 extern struct acpicpu_softc **acpicpu_sc;
     57 
     58 uint32_t
     59 acpicpu_md_cap(void)
     60 {
     61 	struct cpu_info *ci = curcpu();
     62 	uint32_t val = 0;
     63 
     64 	if (cpu_vendor != CPUVENDOR_INTEL)
     65 		return val;
     66 
     67 	/*
     68 	 * Basic SMP C-states (required for _CST).
     69 	 */
     70 	val |= ACPICPU_PDC_C_C1PT | ACPICPU_PDC_C_C2C3;
     71 
     72         /*
     73 	 * If MONITOR/MWAIT is available, announce
     74 	 * support for native instructions in all C-states.
     75 	 */
     76         if ((ci->ci_feat_val[1] & CPUID2_MONITOR) != 0)
     77 		val |= ACPICPU_PDC_C_C1_FFH | ACPICPU_PDC_C_C2C3_FFH;
     78 
     79 	/*
     80 	 * Set native P-states if EST is available.
     81 	 */
     82         if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
     83 		val |= ACPICPU_PDC_P_FFH;
     84 
     85 	return val;
     86 }
     87 
     88 uint32_t
     89 acpicpu_md_quirks(void)
     90 {
     91 	struct cpu_info *ci = curcpu();
     92 	uint32_t val = 0;
     93 
     94 	if (acpicpu_md_cpus_running() == 1)
     95 		val |= ACPICPU_FLAG_C_BM;
     96 
     97 	if ((ci->ci_feat_val[1] & CPUID2_MONITOR) != 0)
     98 		val |= ACPICPU_FLAG_C_FFH;
     99 
    100 	switch (cpu_vendor) {
    101 
    102 	case CPUVENDOR_INTEL:
    103 
    104 		val |= ACPICPU_FLAG_C_BM | ACPICPU_FLAG_C_ARB;
    105 
    106 		if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
    107 			val |= ACPICPU_FLAG_P_FFH;
    108 
    109 		/*
    110 		 * Bus master arbitration is not
    111 		 * needed on some recent Intel CPUs.
    112 		 */
    113 		if (CPUID2FAMILY(ci->ci_signature) > 15)
    114 			val &= ~ACPICPU_FLAG_C_ARB;
    115 
    116 		if (CPUID2FAMILY(ci->ci_signature) == 6 &&
    117 		    CPUID2MODEL(ci->ci_signature) >= 15)
    118 			val &= ~ACPICPU_FLAG_C_ARB;
    119 
    120 		break;
    121 
    122 	case CPUVENDOR_AMD:
    123 
    124 		/*
    125 		 * XXX: Deal with the AMD C1E extension here.
    126 		 */
    127 		break;
    128 	}
    129 
    130 	return val;
    131 }
    132 
    133 uint32_t
    134 acpicpu_md_cpus_running(void)
    135 {
    136 
    137 	return popcount32(cpus_running);
    138 }
    139 
    140 int
    141 acpicpu_md_idle_init(void)
    142 {
    143 	const size_t size = sizeof(native_idle_text);
    144 
    145 	x86_disable_intr();
    146 	x86_cpu_idle_get(&native_idle, native_idle_text, size);
    147 	x86_enable_intr();
    148 
    149 	return 0;
    150 }
    151 
    152 int
    153 acpicpu_md_idle_start(void)
    154 {
    155 
    156 	x86_disable_intr();
    157 	x86_cpu_idle_set(acpicpu_cstate_idle, "acpi");
    158 	x86_enable_intr();
    159 
    160 	return 0;
    161 }
    162 
    163 int
    164 acpicpu_md_idle_stop(void)
    165 {
    166 	uint64_t xc;
    167 
    168 	x86_disable_intr();
    169 	x86_cpu_idle_set(native_idle, native_idle_text);
    170 	x86_enable_intr();
    171 
    172 	/*
    173 	 * Run a cross-call to ensure that all CPUs are
    174 	 * out from the ACPI idle-loop before detachment.
    175 	 */
    176 	xc = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
    177 	xc_wait(xc);
    178 
    179 	return 0;
    180 }
    181 
    182 /*
    183  * The MD idle loop. Called with interrupts disabled.
    184  */
    185 void
    186 acpicpu_md_idle_enter(int method, int state)
    187 {
    188 	struct cpu_info *ci = curcpu();
    189 
    190 	switch (method) {
    191 
    192 	case ACPICPU_C_STATE_FFH:
    193 
    194 		x86_enable_intr();
    195 		x86_monitor(&ci->ci_want_resched, 0, 0);
    196 
    197 		if (__predict_false(ci->ci_want_resched) != 0)
    198 			return;
    199 
    200 		x86_mwait((state - 1) << 4, 0);
    201 		break;
    202 
    203 	case ACPICPU_C_STATE_HALT:
    204 
    205 		if (__predict_false(ci->ci_want_resched) != 0) {
    206 			x86_enable_intr();
    207 			return;
    208 		}
    209 
    210 		x86_stihlt();
    211 		break;
    212 	}
    213 }
    214 
    215 int
    216 acpicpu_md_pstate_start(void)
    217 {
    218 
    219 	cpu_freq_sysctl_get = acpicpu_md_pstate_sysctl_get;
    220 	cpu_freq_sysctl_set = acpicpu_md_pstate_sysctl_set;
    221 	cpu_freq_sysctl_all = acpicpu_md_pstate_sysctl_all;
    222 
    223 	return 0;
    224 }
    225 
    226 int
    227 acpicpu_md_pstate_stop(void)
    228 {
    229 
    230 	cpu_freq_sysctl_get = NULL;
    231 	cpu_freq_sysctl_set = NULL;
    232 	cpu_freq_sysctl_all = NULL;
    233 
    234 	return 0;
    235 }
    236 
    237 static int
    238 acpicpu_md_pstate_sysctl_get(SYSCTLFN_ARGS)
    239 {
    240 	struct cpu_info *ci = curcpu();
    241 	struct acpicpu_softc *sc;
    242 	struct sysctlnode node;
    243 	uint32_t freq;
    244 	int err;
    245 
    246 	/*
    247 	 * We can use any ACPI CPU to manipulate the
    248 	 * frequencies. In MP environments all CPUs
    249 	 * are mandated to support the same number of
    250 	 * P-states and each state must have identical
    251 	 * parameters across CPUs.
    252 	 */
    253 	sc = acpicpu_sc[ci->ci_acpiid];
    254 
    255 	if (sc == NULL)
    256 		return ENXIO;
    257 
    258 	err = acpicpu_pstate_get(sc, &freq);
    259 
    260 	if (err != 0)
    261 		return err;
    262 
    263 	node = *rnode;
    264 	node.sysctl_data = &freq;
    265 
    266 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
    267 
    268 	if (err != 0 || newp == NULL)
    269 		return err;
    270 
    271 	return 0;
    272 }
    273 
    274 static int
    275 acpicpu_md_pstate_sysctl_set(SYSCTLFN_ARGS)
    276 {
    277 	struct cpu_info *ci = curcpu();
    278 	struct acpicpu_softc *sc;
    279 	struct sysctlnode node;
    280 	uint32_t freq;
    281 	int err;
    282 
    283 	sc = acpicpu_sc[ci->ci_acpiid];
    284 
    285 	if (sc == NULL)
    286 		return ENXIO;
    287 
    288 	err = acpicpu_pstate_get(sc, &freq);
    289 
    290 	if (err != 0)
    291 		return err;
    292 
    293 	node = *rnode;
    294 	node.sysctl_data = &freq;
    295 
    296 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
    297 
    298 	if (err != 0 || newp == NULL)
    299 		return err;
    300 
    301 	err = acpicpu_pstate_set(sc, freq);
    302 
    303 	if (err != 0)
    304 		return err;
    305 
    306 	return 0;
    307 }
    308 
    309 static int
    310 acpicpu_md_pstate_sysctl_all(SYSCTLFN_ARGS)
    311 {
    312 	struct cpu_info *ci = curcpu();
    313 	struct acpicpu_softc *sc;
    314 	struct sysctlnode node;
    315 	char buf[1024];
    316 	size_t len;
    317 	uint32_t i;
    318 	int err;
    319 
    320 	sc = acpicpu_sc[ci->ci_acpiid];
    321 
    322 	if (sc == NULL)
    323 		return ENXIO;
    324 
    325 	(void)memset(&buf, 0, sizeof(buf));
    326 
    327 	mutex_enter(&sc->sc_mtx);
    328 
    329 	for (len = 0, i = sc->sc_pstate_max; i < sc->sc_pstate_count; i++) {
    330 
    331 		if (sc->sc_pstate[i].ps_freq == 0)
    332 			continue;
    333 
    334 		len += snprintf(buf + len, sizeof(buf) - len, "%u%s",
    335 		    sc->sc_pstate[i].ps_freq,
    336 		    i < (sc->sc_pstate_count - 1) ? " " : "");
    337 	}
    338 
    339 	mutex_exit(&sc->sc_mtx);
    340 
    341 	node = *rnode;
    342 	node.sysctl_data = buf;
    343 
    344 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
    345 
    346 	if (err != 0 || newp == NULL)
    347 		return err;
    348 
    349 	return 0;
    350 }
    351 
    352 int
    353 acpicpu_md_pstate_get(struct acpicpu_softc *sc, uint32_t *freq)
    354 {
    355 	struct acpicpu_pstate *ps;
    356 	uint64_t val;
    357 	uint32_t i;
    358 
    359 	switch (cpu_vendor) {
    360 
    361 	case CPUVENDOR_INTEL:
    362 
    363 		val = rdmsr(MSR_PERF_STATUS);
    364 		val = val & 0xffff;
    365 
    366 		mutex_enter(&sc->sc_mtx);
    367 
    368 		for (i = sc->sc_pstate_max; i < sc->sc_pstate_count; i++) {
    369 
    370 			ps = &sc->sc_pstate[i];
    371 
    372 			if (ps->ps_freq == 0)
    373 				continue;
    374 
    375 			if (val == ps->ps_status) {
    376 				mutex_exit(&sc->sc_mtx);
    377 				*freq = ps->ps_freq;
    378 				return 0;
    379 			}
    380 		}
    381 
    382 		mutex_exit(&sc->sc_mtx);
    383 
    384 		return EIO;
    385 
    386 	default:
    387 		return ENODEV;
    388 	}
    389 
    390 	return 0;
    391 }
    392 
    393 int
    394 acpicpu_md_pstate_set(struct acpicpu_pstate *ps)
    395 {
    396 	struct msr_rw_info msr;
    397 	uint64_t xc, val;
    398 	int i;
    399 
    400 	switch (cpu_vendor) {
    401 
    402 	case CPUVENDOR_INTEL:
    403 		msr.msr_read  = true;
    404 		msr.msr_type  = MSR_PERF_CTL;
    405 		msr.msr_value = ps->ps_control;
    406 		msr.msr_mask  = 0xffffULL;
    407 		break;
    408 
    409 	default:
    410 		return ENODEV;
    411 	}
    412 
    413 	xc = xc_broadcast(0, (xcfunc_t)x86_msr_xcall, &msr, NULL);
    414 	xc_wait(xc);
    415 
    416 	for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
    417 
    418 		val = rdmsr(MSR_PERF_STATUS);
    419 		val = val & 0xffff;
    420 
    421 		if (val == ps->ps_status)
    422 			return 0;
    423 
    424 		DELAY(ps->ps_latency);
    425 	}
    426 
    427 	return EAGAIN;
    428 }
    429