acpi_cpu_cstate.c revision 1.50.2.2 1 1.50.2.2 rmind /* $NetBSD: acpi_cpu_cstate.c,v 1.50.2.2 2011/03/05 20:53:02 rmind Exp $ */
2 1.50.2.2 rmind
3 1.50.2.2 rmind /*-
4 1.50.2.2 rmind * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen (at) iki.fi>
5 1.50.2.2 rmind * All rights reserved.
6 1.50.2.2 rmind *
7 1.50.2.2 rmind * Redistribution and use in source and binary forms, with or without
8 1.50.2.2 rmind * modification, are permitted provided that the following conditions
9 1.50.2.2 rmind * are met:
10 1.50.2.2 rmind *
11 1.50.2.2 rmind * 1. Redistributions of source code must retain the above copyright
12 1.50.2.2 rmind * notice, this list of conditions and the following disclaimer.
13 1.50.2.2 rmind * 2. Redistributions in binary form must reproduce the above copyright
14 1.50.2.2 rmind * notice, this list of conditions and the following disclaimer in the
15 1.50.2.2 rmind * documentation and/or other materials provided with the distribution.
16 1.50.2.2 rmind *
17 1.50.2.2 rmind * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 1.50.2.2 rmind * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 1.50.2.2 rmind * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 1.50.2.2 rmind * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 1.50.2.2 rmind * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 1.50.2.2 rmind * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 1.50.2.2 rmind * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 1.50.2.2 rmind * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 1.50.2.2 rmind * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 1.50.2.2 rmind * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 1.50.2.2 rmind * SUCH DAMAGE.
28 1.50.2.2 rmind */
29 1.50.2.2 rmind #include <sys/cdefs.h>
30 1.50.2.2 rmind __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.50.2.2 2011/03/05 20:53:02 rmind Exp $");
31 1.50.2.2 rmind
32 1.50.2.2 rmind #include <sys/param.h>
33 1.50.2.2 rmind #include <sys/cpu.h>
34 1.50.2.2 rmind #include <sys/device.h>
35 1.50.2.2 rmind #include <sys/kernel.h>
36 1.50.2.2 rmind #include <sys/once.h>
37 1.50.2.2 rmind #include <sys/mutex.h>
38 1.50.2.2 rmind #include <sys/timetc.h>
39 1.50.2.2 rmind
40 1.50.2.2 rmind #include <dev/acpi/acpireg.h>
41 1.50.2.2 rmind #include <dev/acpi/acpivar.h>
42 1.50.2.2 rmind #include <dev/acpi/acpi_cpu.h>
43 1.50.2.2 rmind #include <dev/acpi/acpi_timer.h>
44 1.50.2.2 rmind
45 1.50.2.2 rmind #include <machine/acpi_machdep.h>
46 1.50.2.2 rmind
47 1.50.2.2 rmind #define _COMPONENT ACPI_BUS_COMPONENT
48 1.50.2.2 rmind ACPI_MODULE_NAME ("acpi_cpu_cstate")
49 1.50.2.2 rmind
50 1.50.2.2 rmind static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *);
51 1.50.2.2 rmind static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *,
52 1.50.2.2 rmind ACPI_OBJECT *, int );
53 1.50.2.2 rmind static void acpicpu_cstate_cst_bios(void);
54 1.50.2.2 rmind static void acpicpu_cstate_memset(struct acpicpu_softc *);
55 1.50.2.2 rmind static ACPI_STATUS acpicpu_cstate_dep(struct acpicpu_softc *);
56 1.50.2.2 rmind static void acpicpu_cstate_fadt(struct acpicpu_softc *);
57 1.50.2.2 rmind static void acpicpu_cstate_quirks(struct acpicpu_softc *);
58 1.50.2.2 rmind static int acpicpu_cstate_latency(struct acpicpu_softc *);
59 1.50.2.2 rmind static bool acpicpu_cstate_bm_check(void);
60 1.50.2.2 rmind static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
61 1.50.2.2 rmind
62 1.50.2.2 rmind extern struct acpicpu_softc **acpicpu_sc;
63 1.50.2.2 rmind
64 1.50.2.2 rmind /*
65 1.50.2.2 rmind * XXX: The local APIC timer (as well as TSC) is typically stopped in C3.
66 1.50.2.2 rmind * For now, we cannot but disable C3. But there appears to be timer-
67 1.50.2.2 rmind * related interrupt issues also in C2. The only entirely safe option
68 1.50.2.2 rmind * at the moment is to use C1.
69 1.50.2.2 rmind */
70 1.50.2.2 rmind #ifdef ACPICPU_ENABLE_C3
71 1.50.2.2 rmind static int cs_state_max = ACPI_STATE_C3;
72 1.50.2.2 rmind #else
73 1.50.2.2 rmind static int cs_state_max = ACPI_STATE_C1;
74 1.50.2.2 rmind #endif
75 1.50.2.2 rmind
76 1.50.2.2 rmind void
77 1.50.2.2 rmind acpicpu_cstate_attach(device_t self)
78 1.50.2.2 rmind {
79 1.50.2.2 rmind struct acpicpu_softc *sc = device_private(self);
80 1.50.2.2 rmind ACPI_STATUS rv;
81 1.50.2.2 rmind
82 1.50.2.2 rmind /*
83 1.50.2.2 rmind * Either use the preferred _CST or resort to FADT.
84 1.50.2.2 rmind */
85 1.50.2.2 rmind rv = acpicpu_cstate_cst(sc);
86 1.50.2.2 rmind
87 1.50.2.2 rmind switch (rv) {
88 1.50.2.2 rmind
89 1.50.2.2 rmind case AE_OK:
90 1.50.2.2 rmind acpicpu_cstate_cst_bios();
91 1.50.2.2 rmind break;
92 1.50.2.2 rmind
93 1.50.2.2 rmind default:
94 1.50.2.2 rmind sc->sc_flags |= ACPICPU_FLAG_C_FADT;
95 1.50.2.2 rmind acpicpu_cstate_fadt(sc);
96 1.50.2.2 rmind break;
97 1.50.2.2 rmind }
98 1.50.2.2 rmind
99 1.50.2.2 rmind /*
100 1.50.2.2 rmind * Query the optional _CSD.
101 1.50.2.2 rmind */
102 1.50.2.2 rmind rv = acpicpu_cstate_dep(sc);
103 1.50.2.2 rmind
104 1.50.2.2 rmind if (ACPI_SUCCESS(rv))
105 1.50.2.2 rmind sc->sc_flags |= ACPICPU_FLAG_C_DEP;
106 1.50.2.2 rmind
107 1.50.2.2 rmind sc->sc_flags |= ACPICPU_FLAG_C;
108 1.50.2.2 rmind
109 1.50.2.2 rmind acpicpu_cstate_quirks(sc);
110 1.50.2.2 rmind }
111 1.50.2.2 rmind
112 1.50.2.2 rmind int
113 1.50.2.2 rmind acpicpu_cstate_detach(device_t self)
114 1.50.2.2 rmind {
115 1.50.2.2 rmind struct acpicpu_softc *sc = device_private(self);
116 1.50.2.2 rmind static ONCE_DECL(once_detach);
117 1.50.2.2 rmind int rv;
118 1.50.2.2 rmind
119 1.50.2.2 rmind rv = RUN_ONCE(&once_detach, acpicpu_md_cstate_stop);
120 1.50.2.2 rmind
121 1.50.2.2 rmind if (rv != 0)
122 1.50.2.2 rmind return rv;
123 1.50.2.2 rmind
124 1.50.2.2 rmind sc->sc_flags &= ~ACPICPU_FLAG_C;
125 1.50.2.2 rmind
126 1.50.2.2 rmind return 0;
127 1.50.2.2 rmind }
128 1.50.2.2 rmind
129 1.50.2.2 rmind void
130 1.50.2.2 rmind acpicpu_cstate_start(device_t self)
131 1.50.2.2 rmind {
132 1.50.2.2 rmind struct acpicpu_softc *sc = device_private(self);
133 1.50.2.2 rmind
134 1.50.2.2 rmind (void)acpicpu_md_cstate_start(sc);
135 1.50.2.2 rmind }
136 1.50.2.2 rmind
137 1.50.2.2 rmind bool
138 1.50.2.2 rmind acpicpu_cstate_suspend(device_t self)
139 1.50.2.2 rmind {
140 1.50.2.2 rmind return true;
141 1.50.2.2 rmind }
142 1.50.2.2 rmind
143 1.50.2.2 rmind bool
144 1.50.2.2 rmind acpicpu_cstate_resume(device_t self)
145 1.50.2.2 rmind {
146 1.50.2.2 rmind static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
147 1.50.2.2 rmind struct acpicpu_softc *sc = device_private(self);
148 1.50.2.2 rmind
149 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) == 0)
150 1.50.2.2 rmind (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
151 1.50.2.2 rmind
152 1.50.2.2 rmind return true;
153 1.50.2.2 rmind }
154 1.50.2.2 rmind
155 1.50.2.2 rmind void
156 1.50.2.2 rmind acpicpu_cstate_callback(void *aux)
157 1.50.2.2 rmind {
158 1.50.2.2 rmind struct acpicpu_softc *sc;
159 1.50.2.2 rmind device_t self = aux;
160 1.50.2.2 rmind
161 1.50.2.2 rmind sc = device_private(self);
162 1.50.2.2 rmind
163 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0)
164 1.50.2.2 rmind return;
165 1.50.2.2 rmind
166 1.50.2.2 rmind mutex_enter(&sc->sc_mtx);
167 1.50.2.2 rmind (void)acpicpu_cstate_cst(sc);
168 1.50.2.2 rmind mutex_exit(&sc->sc_mtx);
169 1.50.2.2 rmind }
170 1.50.2.2 rmind
171 1.50.2.2 rmind static ACPI_STATUS
172 1.50.2.2 rmind acpicpu_cstate_cst(struct acpicpu_softc *sc)
173 1.50.2.2 rmind {
174 1.50.2.2 rmind ACPI_OBJECT *elm, *obj;
175 1.50.2.2 rmind ACPI_BUFFER buf;
176 1.50.2.2 rmind ACPI_STATUS rv;
177 1.50.2.2 rmind uint32_t i, n;
178 1.50.2.2 rmind uint8_t count;
179 1.50.2.2 rmind
180 1.50.2.2 rmind rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
181 1.50.2.2 rmind
182 1.50.2.2 rmind if (ACPI_FAILURE(rv))
183 1.50.2.2 rmind return rv;
184 1.50.2.2 rmind
185 1.50.2.2 rmind obj = buf.Pointer;
186 1.50.2.2 rmind
187 1.50.2.2 rmind if (obj->Type != ACPI_TYPE_PACKAGE) {
188 1.50.2.2 rmind rv = AE_TYPE;
189 1.50.2.2 rmind goto out;
190 1.50.2.2 rmind }
191 1.50.2.2 rmind
192 1.50.2.2 rmind if (obj->Package.Count < 2) {
193 1.50.2.2 rmind rv = AE_LIMIT;
194 1.50.2.2 rmind goto out;
195 1.50.2.2 rmind }
196 1.50.2.2 rmind
197 1.50.2.2 rmind elm = obj->Package.Elements;
198 1.50.2.2 rmind
199 1.50.2.2 rmind if (elm[0].Type != ACPI_TYPE_INTEGER) {
200 1.50.2.2 rmind rv = AE_TYPE;
201 1.50.2.2 rmind goto out;
202 1.50.2.2 rmind }
203 1.50.2.2 rmind
204 1.50.2.2 rmind n = elm[0].Integer.Value;
205 1.50.2.2 rmind
206 1.50.2.2 rmind if (n != obj->Package.Count - 1) {
207 1.50.2.2 rmind rv = AE_BAD_VALUE;
208 1.50.2.2 rmind goto out;
209 1.50.2.2 rmind }
210 1.50.2.2 rmind
211 1.50.2.2 rmind if (n > ACPI_C_STATES_MAX) {
212 1.50.2.2 rmind rv = AE_LIMIT;
213 1.50.2.2 rmind goto out;
214 1.50.2.2 rmind }
215 1.50.2.2 rmind
216 1.50.2.2 rmind acpicpu_cstate_memset(sc);
217 1.50.2.2 rmind
218 1.50.2.2 rmind CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
219 1.50.2.2 rmind CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
220 1.50.2.2 rmind
221 1.50.2.2 rmind for (count = 0, i = 1; i <= n; i++) {
222 1.50.2.2 rmind
223 1.50.2.2 rmind elm = &obj->Package.Elements[i];
224 1.50.2.2 rmind rv = acpicpu_cstate_cst_add(sc, elm, i);
225 1.50.2.2 rmind
226 1.50.2.2 rmind if (ACPI_SUCCESS(rv))
227 1.50.2.2 rmind count++;
228 1.50.2.2 rmind }
229 1.50.2.2 rmind
230 1.50.2.2 rmind rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
231 1.50.2.2 rmind
232 1.50.2.2 rmind out:
233 1.50.2.2 rmind if (buf.Pointer != NULL)
234 1.50.2.2 rmind ACPI_FREE(buf.Pointer);
235 1.50.2.2 rmind
236 1.50.2.2 rmind return rv;
237 1.50.2.2 rmind }
238 1.50.2.2 rmind
239 1.50.2.2 rmind static ACPI_STATUS
240 1.50.2.2 rmind acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm, int i)
241 1.50.2.2 rmind {
242 1.50.2.2 rmind struct acpicpu_cstate *cs = sc->sc_cstate;
243 1.50.2.2 rmind struct acpicpu_cstate state;
244 1.50.2.2 rmind struct acpicpu_reg *reg;
245 1.50.2.2 rmind ACPI_STATUS rv = AE_OK;
246 1.50.2.2 rmind ACPI_OBJECT *obj;
247 1.50.2.2 rmind uint32_t type;
248 1.50.2.2 rmind
249 1.50.2.2 rmind (void)memset(&state, 0, sizeof(*cs));
250 1.50.2.2 rmind
251 1.50.2.2 rmind state.cs_flags = ACPICPU_FLAG_C_BM_STS;
252 1.50.2.2 rmind
253 1.50.2.2 rmind if (elm->Type != ACPI_TYPE_PACKAGE) {
254 1.50.2.2 rmind rv = AE_TYPE;
255 1.50.2.2 rmind goto out;
256 1.50.2.2 rmind }
257 1.50.2.2 rmind
258 1.50.2.2 rmind if (elm->Package.Count != 4) {
259 1.50.2.2 rmind rv = AE_LIMIT;
260 1.50.2.2 rmind goto out;
261 1.50.2.2 rmind }
262 1.50.2.2 rmind
263 1.50.2.2 rmind /*
264 1.50.2.2 rmind * Type.
265 1.50.2.2 rmind */
266 1.50.2.2 rmind obj = &elm->Package.Elements[1];
267 1.50.2.2 rmind
268 1.50.2.2 rmind if (obj->Type != ACPI_TYPE_INTEGER) {
269 1.50.2.2 rmind rv = AE_TYPE;
270 1.50.2.2 rmind goto out;
271 1.50.2.2 rmind }
272 1.50.2.2 rmind
273 1.50.2.2 rmind type = obj->Integer.Value;
274 1.50.2.2 rmind
275 1.50.2.2 rmind if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
276 1.50.2.2 rmind rv = AE_TYPE;
277 1.50.2.2 rmind goto out;
278 1.50.2.2 rmind }
279 1.50.2.2 rmind
280 1.50.2.2 rmind /*
281 1.50.2.2 rmind * Latency.
282 1.50.2.2 rmind */
283 1.50.2.2 rmind obj = &elm->Package.Elements[2];
284 1.50.2.2 rmind
285 1.50.2.2 rmind if (obj->Type != ACPI_TYPE_INTEGER) {
286 1.50.2.2 rmind rv = AE_TYPE;
287 1.50.2.2 rmind goto out;
288 1.50.2.2 rmind }
289 1.50.2.2 rmind
290 1.50.2.2 rmind state.cs_latency = obj->Integer.Value;
291 1.50.2.2 rmind
292 1.50.2.2 rmind /*
293 1.50.2.2 rmind * Power.
294 1.50.2.2 rmind */
295 1.50.2.2 rmind obj = &elm->Package.Elements[3];
296 1.50.2.2 rmind
297 1.50.2.2 rmind if (obj->Type != ACPI_TYPE_INTEGER) {
298 1.50.2.2 rmind rv = AE_TYPE;
299 1.50.2.2 rmind goto out;
300 1.50.2.2 rmind }
301 1.50.2.2 rmind
302 1.50.2.2 rmind state.cs_power = obj->Integer.Value;
303 1.50.2.2 rmind
304 1.50.2.2 rmind /*
305 1.50.2.2 rmind * Register.
306 1.50.2.2 rmind */
307 1.50.2.2 rmind obj = &elm->Package.Elements[0];
308 1.50.2.2 rmind
309 1.50.2.2 rmind if (obj->Type != ACPI_TYPE_BUFFER) {
310 1.50.2.2 rmind rv = AE_TYPE;
311 1.50.2.2 rmind goto out;
312 1.50.2.2 rmind }
313 1.50.2.2 rmind
314 1.50.2.2 rmind CTASSERT(sizeof(struct acpicpu_reg) == 15);
315 1.50.2.2 rmind
316 1.50.2.2 rmind if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
317 1.50.2.2 rmind rv = AE_LIMIT;
318 1.50.2.2 rmind goto out;
319 1.50.2.2 rmind }
320 1.50.2.2 rmind
321 1.50.2.2 rmind reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
322 1.50.2.2 rmind
323 1.50.2.2 rmind switch (reg->reg_spaceid) {
324 1.50.2.2 rmind
325 1.50.2.2 rmind case ACPI_ADR_SPACE_SYSTEM_IO:
326 1.50.2.2 rmind state.cs_method = ACPICPU_C_STATE_SYSIO;
327 1.50.2.2 rmind
328 1.50.2.2 rmind if (reg->reg_addr == 0) {
329 1.50.2.2 rmind rv = AE_AML_ILLEGAL_ADDRESS;
330 1.50.2.2 rmind goto out;
331 1.50.2.2 rmind }
332 1.50.2.2 rmind
333 1.50.2.2 rmind if (reg->reg_bitwidth != 8) {
334 1.50.2.2 rmind rv = AE_AML_BAD_RESOURCE_LENGTH;
335 1.50.2.2 rmind goto out;
336 1.50.2.2 rmind }
337 1.50.2.2 rmind
338 1.50.2.2 rmind state.cs_addr = reg->reg_addr;
339 1.50.2.2 rmind break;
340 1.50.2.2 rmind
341 1.50.2.2 rmind case ACPI_ADR_SPACE_FIXED_HARDWARE:
342 1.50.2.2 rmind state.cs_method = ACPICPU_C_STATE_FFH;
343 1.50.2.2 rmind
344 1.50.2.2 rmind switch (type) {
345 1.50.2.2 rmind
346 1.50.2.2 rmind case ACPI_STATE_C1:
347 1.50.2.2 rmind
348 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0)
349 1.50.2.2 rmind state.cs_method = ACPICPU_C_STATE_HALT;
350 1.50.2.2 rmind
351 1.50.2.2 rmind break;
352 1.50.2.2 rmind
353 1.50.2.2 rmind default:
354 1.50.2.2 rmind
355 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) {
356 1.50.2.2 rmind rv = AE_SUPPORT;
357 1.50.2.2 rmind goto out;
358 1.50.2.2 rmind }
359 1.50.2.2 rmind }
360 1.50.2.2 rmind
361 1.50.2.2 rmind if (sc->sc_cap != 0) {
362 1.50.2.2 rmind
363 1.50.2.2 rmind /*
364 1.50.2.2 rmind * The _CST FFH GAS encoding may contain
365 1.50.2.2 rmind * additional hints on Intel processors.
366 1.50.2.2 rmind * Use these to determine whether we can
367 1.50.2.2 rmind * avoid the bus master activity check.
368 1.50.2.2 rmind */
369 1.50.2.2 rmind if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
370 1.50.2.2 rmind state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
371 1.50.2.2 rmind }
372 1.50.2.2 rmind
373 1.50.2.2 rmind break;
374 1.50.2.2 rmind
375 1.50.2.2 rmind default:
376 1.50.2.2 rmind rv = AE_AML_INVALID_SPACE_ID;
377 1.50.2.2 rmind goto out;
378 1.50.2.2 rmind }
379 1.50.2.2 rmind
380 1.50.2.2 rmind /*
381 1.50.2.2 rmind * As some systems define the type arbitrarily,
382 1.50.2.2 rmind * we use a sequential counter instead of the
383 1.50.2.2 rmind * BIOS data. For instance, AMD family 14h is
384 1.50.2.2 rmind * instructed to only use the value 2; see
385 1.50.2.2 rmind *
386 1.50.2.2 rmind * Advanced Micro Devices: BIOS and Kernel
387 1.50.2.2 rmind * Developer's Guide (BKDG) for AMD Family
388 1.50.2.2 rmind * 14h Models 00h-0Fh Processors. Revision
389 1.50.2.2 rmind * 3.00, January 4, 2011.
390 1.50.2.2 rmind */
391 1.50.2.2 rmind if (i != (int)type) {
392 1.50.2.2 rmind
393 1.50.2.2 rmind ACPI_DEBUG_PRINT((ACPI_DB_INFO,
394 1.50.2.2 rmind "C%d != C%u from BIOS", i, type));
395 1.50.2.2 rmind }
396 1.50.2.2 rmind
397 1.50.2.2 rmind KASSERT(cs[i].cs_method == 0);
398 1.50.2.2 rmind
399 1.50.2.2 rmind cs[i].cs_addr = state.cs_addr;
400 1.50.2.2 rmind cs[i].cs_power = state.cs_power;
401 1.50.2.2 rmind cs[i].cs_flags = state.cs_flags;
402 1.50.2.2 rmind cs[i].cs_method = state.cs_method;
403 1.50.2.2 rmind cs[i].cs_latency = state.cs_latency;
404 1.50.2.2 rmind
405 1.50.2.2 rmind out:
406 1.50.2.2 rmind if (ACPI_FAILURE(rv))
407 1.50.2.2 rmind aprint_error_dev(sc->sc_dev, "failed to add "
408 1.50.2.2 rmind "C-state: %s\n", AcpiFormatException(rv));
409 1.50.2.2 rmind
410 1.50.2.2 rmind return rv;
411 1.50.2.2 rmind }
412 1.50.2.2 rmind
413 1.50.2.2 rmind static void
414 1.50.2.2 rmind acpicpu_cstate_cst_bios(void)
415 1.50.2.2 rmind {
416 1.50.2.2 rmind const uint8_t val = AcpiGbl_FADT.CstControl;
417 1.50.2.2 rmind const uint32_t addr = AcpiGbl_FADT.SmiCommand;
418 1.50.2.2 rmind
419 1.50.2.2 rmind if (addr == 0 || val == 0)
420 1.50.2.2 rmind return;
421 1.50.2.2 rmind
422 1.50.2.2 rmind (void)AcpiOsWritePort(addr, val, 8);
423 1.50.2.2 rmind }
424 1.50.2.2 rmind
425 1.50.2.2 rmind static void
426 1.50.2.2 rmind acpicpu_cstate_memset(struct acpicpu_softc *sc)
427 1.50.2.2 rmind {
428 1.50.2.2 rmind uint8_t i = 0;
429 1.50.2.2 rmind
430 1.50.2.2 rmind while (i < __arraycount(sc->sc_cstate)) {
431 1.50.2.2 rmind
432 1.50.2.2 rmind sc->sc_cstate[i].cs_addr = 0;
433 1.50.2.2 rmind sc->sc_cstate[i].cs_power = 0;
434 1.50.2.2 rmind sc->sc_cstate[i].cs_flags = 0;
435 1.50.2.2 rmind sc->sc_cstate[i].cs_method = 0;
436 1.50.2.2 rmind sc->sc_cstate[i].cs_latency = 0;
437 1.50.2.2 rmind
438 1.50.2.2 rmind i++;
439 1.50.2.2 rmind }
440 1.50.2.2 rmind }
441 1.50.2.2 rmind
442 1.50.2.2 rmind static ACPI_STATUS
443 1.50.2.2 rmind acpicpu_cstate_dep(struct acpicpu_softc *sc)
444 1.50.2.2 rmind {
445 1.50.2.2 rmind ACPI_OBJECT *elm, *obj;
446 1.50.2.2 rmind ACPI_BUFFER buf;
447 1.50.2.2 rmind ACPI_STATUS rv;
448 1.50.2.2 rmind uint32_t val;
449 1.50.2.2 rmind uint8_t i, n;
450 1.50.2.2 rmind
451 1.50.2.2 rmind rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CSD", &buf);
452 1.50.2.2 rmind
453 1.50.2.2 rmind if (ACPI_FAILURE(rv))
454 1.50.2.2 rmind goto out;
455 1.50.2.2 rmind
456 1.50.2.2 rmind obj = buf.Pointer;
457 1.50.2.2 rmind
458 1.50.2.2 rmind if (obj->Type != ACPI_TYPE_PACKAGE) {
459 1.50.2.2 rmind rv = AE_TYPE;
460 1.50.2.2 rmind goto out;
461 1.50.2.2 rmind }
462 1.50.2.2 rmind
463 1.50.2.2 rmind if (obj->Package.Count != 1) {
464 1.50.2.2 rmind rv = AE_LIMIT;
465 1.50.2.2 rmind goto out;
466 1.50.2.2 rmind }
467 1.50.2.2 rmind
468 1.50.2.2 rmind elm = &obj->Package.Elements[0];
469 1.50.2.2 rmind
470 1.50.2.2 rmind if (obj->Type != ACPI_TYPE_PACKAGE) {
471 1.50.2.2 rmind rv = AE_TYPE;
472 1.50.2.2 rmind goto out;
473 1.50.2.2 rmind }
474 1.50.2.2 rmind
475 1.50.2.2 rmind n = elm->Package.Count;
476 1.50.2.2 rmind
477 1.50.2.2 rmind if (n != 6) {
478 1.50.2.2 rmind rv = AE_LIMIT;
479 1.50.2.2 rmind goto out;
480 1.50.2.2 rmind }
481 1.50.2.2 rmind
482 1.50.2.2 rmind elm = elm->Package.Elements;
483 1.50.2.2 rmind
484 1.50.2.2 rmind for (i = 0; i < n; i++) {
485 1.50.2.2 rmind
486 1.50.2.2 rmind if (elm[i].Type != ACPI_TYPE_INTEGER) {
487 1.50.2.2 rmind rv = AE_TYPE;
488 1.50.2.2 rmind goto out;
489 1.50.2.2 rmind }
490 1.50.2.2 rmind
491 1.50.2.2 rmind if (elm[i].Integer.Value > UINT32_MAX) {
492 1.50.2.2 rmind rv = AE_AML_NUMERIC_OVERFLOW;
493 1.50.2.2 rmind goto out;
494 1.50.2.2 rmind }
495 1.50.2.2 rmind }
496 1.50.2.2 rmind
497 1.50.2.2 rmind val = elm[1].Integer.Value;
498 1.50.2.2 rmind
499 1.50.2.2 rmind if (val != 0)
500 1.50.2.2 rmind aprint_debug_dev(sc->sc_dev, "invalid revision in _CSD\n");
501 1.50.2.2 rmind
502 1.50.2.2 rmind val = elm[3].Integer.Value;
503 1.50.2.2 rmind
504 1.50.2.2 rmind if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) {
505 1.50.2.2 rmind rv = AE_AML_BAD_RESOURCE_VALUE;
506 1.50.2.2 rmind goto out;
507 1.50.2.2 rmind }
508 1.50.2.2 rmind
509 1.50.2.2 rmind val = elm[4].Integer.Value;
510 1.50.2.2 rmind
511 1.50.2.2 rmind if (val > sc->sc_ncpus) {
512 1.50.2.2 rmind rv = AE_BAD_VALUE;
513 1.50.2.2 rmind goto out;
514 1.50.2.2 rmind }
515 1.50.2.2 rmind
516 1.50.2.2 rmind sc->sc_cstate_dep.dep_domain = elm[2].Integer.Value;
517 1.50.2.2 rmind sc->sc_cstate_dep.dep_type = elm[3].Integer.Value;
518 1.50.2.2 rmind sc->sc_cstate_dep.dep_ncpus = elm[4].Integer.Value;
519 1.50.2.2 rmind sc->sc_cstate_dep.dep_index = elm[5].Integer.Value;
520 1.50.2.2 rmind
521 1.50.2.2 rmind out:
522 1.50.2.2 rmind if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND)
523 1.50.2.2 rmind aprint_debug_dev(sc->sc_dev, "failed to evaluate "
524 1.50.2.2 rmind "_CSD: %s\n", AcpiFormatException(rv));
525 1.50.2.2 rmind
526 1.50.2.2 rmind if (buf.Pointer != NULL)
527 1.50.2.2 rmind ACPI_FREE(buf.Pointer);
528 1.50.2.2 rmind
529 1.50.2.2 rmind return rv;
530 1.50.2.2 rmind }
531 1.50.2.2 rmind
532 1.50.2.2 rmind static void
533 1.50.2.2 rmind acpicpu_cstate_fadt(struct acpicpu_softc *sc)
534 1.50.2.2 rmind {
535 1.50.2.2 rmind struct acpicpu_cstate *cs = sc->sc_cstate;
536 1.50.2.2 rmind
537 1.50.2.2 rmind acpicpu_cstate_memset(sc);
538 1.50.2.2 rmind
539 1.50.2.2 rmind /*
540 1.50.2.2 rmind * All x86 processors should support C1 (a.k.a. HALT).
541 1.50.2.2 rmind */
542 1.50.2.2 rmind cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
543 1.50.2.2 rmind
544 1.50.2.2 rmind if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) == 0)
545 1.50.2.2 rmind aprint_debug_dev(sc->sc_dev, "HALT not supported?\n");
546 1.50.2.2 rmind
547 1.50.2.2 rmind if (sc->sc_object.ao_pblkaddr == 0)
548 1.50.2.2 rmind return;
549 1.50.2.2 rmind
550 1.50.2.2 rmind if (sc->sc_ncpus > 1) {
551 1.50.2.2 rmind
552 1.50.2.2 rmind if ((AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
553 1.50.2.2 rmind return;
554 1.50.2.2 rmind }
555 1.50.2.2 rmind
556 1.50.2.2 rmind cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
557 1.50.2.2 rmind cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
558 1.50.2.2 rmind
559 1.50.2.2 rmind cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
560 1.50.2.2 rmind cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
561 1.50.2.2 rmind
562 1.50.2.2 rmind cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
563 1.50.2.2 rmind cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
564 1.50.2.2 rmind
565 1.50.2.2 rmind /*
566 1.50.2.2 rmind * The P_BLK length should always be 6. If it
567 1.50.2.2 rmind * is not, reduce functionality accordingly.
568 1.50.2.2 rmind */
569 1.50.2.2 rmind if (sc->sc_object.ao_pblklen < 5)
570 1.50.2.2 rmind cs[ACPI_STATE_C2].cs_method = 0;
571 1.50.2.2 rmind
572 1.50.2.2 rmind if (sc->sc_object.ao_pblklen < 6)
573 1.50.2.2 rmind cs[ACPI_STATE_C3].cs_method = 0;
574 1.50.2.2 rmind
575 1.50.2.2 rmind /*
576 1.50.2.2 rmind * Sanity check the latency levels in FADT.
577 1.50.2.2 rmind * Values above the thresholds are used to
578 1.50.2.2 rmind * inform that C-states are not supported.
579 1.50.2.2 rmind */
580 1.50.2.2 rmind CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
581 1.50.2.2 rmind CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
582 1.50.2.2 rmind
583 1.50.2.2 rmind if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
584 1.50.2.2 rmind cs[ACPI_STATE_C2].cs_method = 0;
585 1.50.2.2 rmind
586 1.50.2.2 rmind if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
587 1.50.2.2 rmind cs[ACPI_STATE_C3].cs_method = 0;
588 1.50.2.2 rmind }
589 1.50.2.2 rmind
590 1.50.2.2 rmind static void
591 1.50.2.2 rmind acpicpu_cstate_quirks(struct acpicpu_softc *sc)
592 1.50.2.2 rmind {
593 1.50.2.2 rmind const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
594 1.50.2.2 rmind const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
595 1.50.2.2 rmind
596 1.50.2.2 rmind /*
597 1.50.2.2 rmind * Disable C3 for PIIX4.
598 1.50.2.2 rmind */
599 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_PIIX4) != 0) {
600 1.50.2.2 rmind sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
601 1.50.2.2 rmind return;
602 1.50.2.2 rmind }
603 1.50.2.2 rmind
604 1.50.2.2 rmind /*
605 1.50.2.2 rmind * Check bus master arbitration. If ARB_DIS
606 1.50.2.2 rmind * is not available, processor caches must be
607 1.50.2.2 rmind * flushed before C3 (ACPI 4.0, section 8.2).
608 1.50.2.2 rmind */
609 1.50.2.2 rmind if (reg != 0 && len != 0) {
610 1.50.2.2 rmind sc->sc_flags |= ACPICPU_FLAG_C_ARB;
611 1.50.2.2 rmind return;
612 1.50.2.2 rmind }
613 1.50.2.2 rmind
614 1.50.2.2 rmind /*
615 1.50.2.2 rmind * Disable C3 entirely if WBINVD is not present.
616 1.50.2.2 rmind */
617 1.50.2.2 rmind if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
618 1.50.2.2 rmind sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
619 1.50.2.2 rmind else {
620 1.50.2.2 rmind /*
621 1.50.2.2 rmind * If WBINVD is present and functioning properly,
622 1.50.2.2 rmind * flush all processor caches before entering C3.
623 1.50.2.2 rmind */
624 1.50.2.2 rmind if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
625 1.50.2.2 rmind sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
626 1.50.2.2 rmind else
627 1.50.2.2 rmind sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
628 1.50.2.2 rmind }
629 1.50.2.2 rmind }
630 1.50.2.2 rmind
631 1.50.2.2 rmind static int
632 1.50.2.2 rmind acpicpu_cstate_latency(struct acpicpu_softc *sc)
633 1.50.2.2 rmind {
634 1.50.2.2 rmind static const uint32_t cs_factor = 3;
635 1.50.2.2 rmind struct acpicpu_cstate *cs;
636 1.50.2.2 rmind int i;
637 1.50.2.2 rmind
638 1.50.2.2 rmind for (i = cs_state_max; i > 0; i--) {
639 1.50.2.2 rmind
640 1.50.2.2 rmind cs = &sc->sc_cstate[i];
641 1.50.2.2 rmind
642 1.50.2.2 rmind if (__predict_false(cs->cs_method == 0))
643 1.50.2.2 rmind continue;
644 1.50.2.2 rmind
645 1.50.2.2 rmind /*
646 1.50.2.2 rmind * Choose a state if we have previously slept
647 1.50.2.2 rmind * longer than the worst case latency of the
648 1.50.2.2 rmind * state times an arbitrary multiplier.
649 1.50.2.2 rmind */
650 1.50.2.2 rmind if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
651 1.50.2.2 rmind return i;
652 1.50.2.2 rmind }
653 1.50.2.2 rmind
654 1.50.2.2 rmind return ACPI_STATE_C1;
655 1.50.2.2 rmind }
656 1.50.2.2 rmind
657 1.50.2.2 rmind /*
658 1.50.2.2 rmind * The main idle loop.
659 1.50.2.2 rmind */
660 1.50.2.2 rmind void
661 1.50.2.2 rmind acpicpu_cstate_idle(void)
662 1.50.2.2 rmind {
663 1.50.2.2 rmind struct cpu_info *ci = curcpu();
664 1.50.2.2 rmind struct acpicpu_softc *sc;
665 1.50.2.2 rmind int state;
666 1.50.2.2 rmind
667 1.50.2.2 rmind acpi_md_OsDisableInterrupt();
668 1.50.2.2 rmind
669 1.50.2.2 rmind if (__predict_false(ci->ci_want_resched != 0))
670 1.50.2.2 rmind goto out;
671 1.50.2.2 rmind
672 1.50.2.2 rmind KASSERT(acpicpu_sc != NULL);
673 1.50.2.2 rmind KASSERT(ci->ci_acpiid < maxcpus);
674 1.50.2.2 rmind
675 1.50.2.2 rmind sc = acpicpu_sc[ci->ci_acpiid];
676 1.50.2.2 rmind
677 1.50.2.2 rmind if (__predict_false(sc == NULL))
678 1.50.2.2 rmind goto out;
679 1.50.2.2 rmind
680 1.50.2.2 rmind KASSERT(ci->ci_ilevel == IPL_NONE);
681 1.50.2.2 rmind KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0);
682 1.50.2.2 rmind
683 1.50.2.2 rmind if (__predict_false(sc->sc_cold != false))
684 1.50.2.2 rmind goto out;
685 1.50.2.2 rmind
686 1.50.2.2 rmind if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
687 1.50.2.2 rmind goto out;
688 1.50.2.2 rmind
689 1.50.2.2 rmind mutex_exit(&sc->sc_mtx);
690 1.50.2.2 rmind state = acpicpu_cstate_latency(sc);
691 1.50.2.2 rmind
692 1.50.2.2 rmind /*
693 1.50.2.2 rmind * Apply AMD C1E quirk.
694 1.50.2.2 rmind */
695 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0)
696 1.50.2.2 rmind acpicpu_md_quirk_c1e();
697 1.50.2.2 rmind
698 1.50.2.2 rmind /*
699 1.50.2.2 rmind * Check for bus master activity. Note that particularly usb(4)
700 1.50.2.2 rmind * causes high activity, which may prevent the use of C3 states.
701 1.50.2.2 rmind */
702 1.50.2.2 rmind if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
703 1.50.2.2 rmind
704 1.50.2.2 rmind if (acpicpu_cstate_bm_check() != false)
705 1.50.2.2 rmind state--;
706 1.50.2.2 rmind
707 1.50.2.2 rmind if (__predict_false(sc->sc_cstate[state].cs_method == 0))
708 1.50.2.2 rmind state = ACPI_STATE_C1;
709 1.50.2.2 rmind }
710 1.50.2.2 rmind
711 1.50.2.2 rmind KASSERT(state != ACPI_STATE_C0);
712 1.50.2.2 rmind
713 1.50.2.2 rmind if (state != ACPI_STATE_C3) {
714 1.50.2.2 rmind acpicpu_cstate_idle_enter(sc, state);
715 1.50.2.2 rmind return;
716 1.50.2.2 rmind }
717 1.50.2.2 rmind
718 1.50.2.2 rmind /*
719 1.50.2.2 rmind * On all recent (Intel) CPUs caches are shared
720 1.50.2.2 rmind * by CPUs and bus master control is required to
721 1.50.2.2 rmind * keep these coherent while in C3. Flushing the
722 1.50.2.2 rmind * CPU caches is only the last resort.
723 1.50.2.2 rmind */
724 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
725 1.50.2.2 rmind ACPI_FLUSH_CPU_CACHE();
726 1.50.2.2 rmind
727 1.50.2.2 rmind /*
728 1.50.2.2 rmind * Allow the bus master to request that any given
729 1.50.2.2 rmind * CPU should return immediately to C0 from C3.
730 1.50.2.2 rmind */
731 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
732 1.50.2.2 rmind (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
733 1.50.2.2 rmind
734 1.50.2.2 rmind /*
735 1.50.2.2 rmind * It may be necessary to disable bus master arbitration
736 1.50.2.2 rmind * to ensure that bus master cycles do not occur while
737 1.50.2.2 rmind * sleeping in C3 (see ACPI 4.0, section 8.1.4).
738 1.50.2.2 rmind */
739 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
740 1.50.2.2 rmind (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
741 1.50.2.2 rmind
742 1.50.2.2 rmind acpicpu_cstate_idle_enter(sc, state);
743 1.50.2.2 rmind
744 1.50.2.2 rmind /*
745 1.50.2.2 rmind * Disable bus master wake and re-enable the arbiter.
746 1.50.2.2 rmind */
747 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
748 1.50.2.2 rmind (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
749 1.50.2.2 rmind
750 1.50.2.2 rmind if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
751 1.50.2.2 rmind (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
752 1.50.2.2 rmind
753 1.50.2.2 rmind return;
754 1.50.2.2 rmind
755 1.50.2.2 rmind out:
756 1.50.2.2 rmind acpi_md_OsEnableInterrupt();
757 1.50.2.2 rmind }
758 1.50.2.2 rmind
759 1.50.2.2 rmind static void
760 1.50.2.2 rmind acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
761 1.50.2.2 rmind {
762 1.50.2.2 rmind struct acpicpu_cstate *cs = &sc->sc_cstate[state];
763 1.50.2.2 rmind uint32_t end, start, val;
764 1.50.2.2 rmind
765 1.50.2.2 rmind start = acpitimer_read_fast(NULL);
766 1.50.2.2 rmind
767 1.50.2.2 rmind switch (cs->cs_method) {
768 1.50.2.2 rmind
769 1.50.2.2 rmind case ACPICPU_C_STATE_FFH:
770 1.50.2.2 rmind case ACPICPU_C_STATE_HALT:
771 1.50.2.2 rmind acpicpu_md_cstate_enter(cs->cs_method, state);
772 1.50.2.2 rmind break;
773 1.50.2.2 rmind
774 1.50.2.2 rmind case ACPICPU_C_STATE_SYSIO:
775 1.50.2.2 rmind (void)AcpiOsReadPort(cs->cs_addr, &val, 8);
776 1.50.2.2 rmind break;
777 1.50.2.2 rmind }
778 1.50.2.2 rmind
779 1.50.2.2 rmind acpi_md_OsEnableInterrupt();
780 1.50.2.2 rmind
781 1.50.2.2 rmind cs->cs_evcnt.ev_count++;
782 1.50.2.2 rmind end = acpitimer_read_fast(NULL);
783 1.50.2.2 rmind sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
784 1.50.2.2 rmind }
785 1.50.2.2 rmind
786 1.50.2.2 rmind static bool
787 1.50.2.2 rmind acpicpu_cstate_bm_check(void)
788 1.50.2.2 rmind {
789 1.50.2.2 rmind uint32_t val = 0;
790 1.50.2.2 rmind ACPI_STATUS rv;
791 1.50.2.2 rmind
792 1.50.2.2 rmind rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
793 1.50.2.2 rmind
794 1.50.2.2 rmind if (ACPI_FAILURE(rv) || val == 0)
795 1.50.2.2 rmind return false;
796 1.50.2.2 rmind
797 1.50.2.2 rmind (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
798 1.50.2.2 rmind
799 1.50.2.2 rmind return true;
800 1.50.2.2 rmind }
801