acpi_cpu_md.c revision 1.14 1 1.14 jruoho /* $NetBSD: acpi_cpu_md.c,v 1.14 2010/08/18 16:08:50 jruoho Exp $ */
2 1.1 jruoho
3 1.1 jruoho /*-
4 1.1 jruoho * Copyright (c) 2010 Jukka Ruohonen <jruohonen (at) iki.fi>
5 1.1 jruoho * All rights reserved.
6 1.1 jruoho *
7 1.1 jruoho * Redistribution and use in source and binary forms, with or without
8 1.1 jruoho * modification, are permitted provided that the following conditions
9 1.1 jruoho * are met:
10 1.1 jruoho *
11 1.1 jruoho * 1. Redistributions of source code must retain the above copyright
12 1.1 jruoho * notice, this list of conditions and the following disclaimer.
13 1.1 jruoho * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 jruoho * notice, this list of conditions and the following disclaimer in the
15 1.1 jruoho * documentation and/or other materials provided with the distribution.
16 1.1 jruoho *
17 1.1 jruoho * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 1.1 jruoho * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 1.1 jruoho * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 1.1 jruoho * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 1.1 jruoho * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 1.1 jruoho * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 1.1 jruoho * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 1.1 jruoho * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 1.1 jruoho * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 1.1 jruoho * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 1.1 jruoho * SUCH DAMAGE.
28 1.1 jruoho */
29 1.1 jruoho #include <sys/cdefs.h>
30 1.14 jruoho __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.14 2010/08/18 16:08:50 jruoho Exp $");
31 1.1 jruoho
32 1.1 jruoho #include <sys/param.h>
33 1.1 jruoho #include <sys/bus.h>
34 1.1 jruoho #include <sys/kcore.h>
35 1.5 jruoho #include <sys/sysctl.h>
36 1.4 jruoho #include <sys/xcall.h>
37 1.1 jruoho
38 1.1 jruoho #include <x86/cpu.h>
39 1.5 jruoho #include <x86/cpufunc.h>
40 1.5 jruoho #include <x86/cputypes.h>
41 1.1 jruoho #include <x86/cpuvar.h>
42 1.5 jruoho #include <x86/cpu_msr.h>
43 1.1 jruoho #include <x86/machdep.h>
44 1.1 jruoho
45 1.1 jruoho #include <dev/acpi/acpica.h>
46 1.1 jruoho #include <dev/acpi/acpi_cpu.h>
47 1.1 jruoho
48 1.12 jruoho #include <dev/pci/pcivar.h>
49 1.12 jruoho #include <dev/pci/pcidevs.h>
50 1.12 jruoho
51 1.5 jruoho static char native_idle_text[16];
52 1.5 jruoho void (*native_idle)(void) = NULL;
53 1.9 jruoho void (*native_cpu_freq_init)(int) = NULL;
54 1.1 jruoho
55 1.12 jruoho static int acpicpu_md_quirks_piix4(struct pci_attach_args *);
56 1.5 jruoho static int acpicpu_md_pstate_sysctl_get(SYSCTLFN_PROTO);
57 1.5 jruoho static int acpicpu_md_pstate_sysctl_set(SYSCTLFN_PROTO);
58 1.5 jruoho static int acpicpu_md_pstate_sysctl_all(SYSCTLFN_PROTO);
59 1.14 jruoho static void acpicpu_md_pstate_status(void *, void *);
60 1.14 jruoho static void acpicpu_md_tstate_status(void *, void *);
61 1.5 jruoho
62 1.5 jruoho extern uint32_t cpus_running;
63 1.5 jruoho extern struct acpicpu_softc **acpicpu_sc;
64 1.1 jruoho
65 1.1 jruoho uint32_t
66 1.1 jruoho acpicpu_md_cap(void)
67 1.1 jruoho {
68 1.1 jruoho struct cpu_info *ci = curcpu();
69 1.1 jruoho uint32_t val = 0;
70 1.1 jruoho
71 1.1 jruoho if (cpu_vendor != CPUVENDOR_INTEL)
72 1.1 jruoho return val;
73 1.1 jruoho
74 1.1 jruoho /*
75 1.1 jruoho * Basic SMP C-states (required for _CST).
76 1.1 jruoho */
77 1.1 jruoho val |= ACPICPU_PDC_C_C1PT | ACPICPU_PDC_C_C2C3;
78 1.1 jruoho
79 1.1 jruoho /*
80 1.1 jruoho * If MONITOR/MWAIT is available, announce
81 1.1 jruoho * support for native instructions in all C-states.
82 1.1 jruoho */
83 1.1 jruoho if ((ci->ci_feat_val[1] & CPUID2_MONITOR) != 0)
84 1.1 jruoho val |= ACPICPU_PDC_C_C1_FFH | ACPICPU_PDC_C_C2C3_FFH;
85 1.1 jruoho
86 1.5 jruoho /*
87 1.10 jruoho * Set native P- and T-states, if available.
88 1.5 jruoho */
89 1.5 jruoho if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
90 1.5 jruoho val |= ACPICPU_PDC_P_FFH;
91 1.5 jruoho
92 1.10 jruoho if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
93 1.10 jruoho val |= ACPICPU_PDC_T_FFH;
94 1.10 jruoho
95 1.1 jruoho return val;
96 1.1 jruoho }
97 1.1 jruoho
98 1.1 jruoho uint32_t
99 1.1 jruoho acpicpu_md_quirks(void)
100 1.1 jruoho {
101 1.1 jruoho struct cpu_info *ci = curcpu();
102 1.12 jruoho struct pci_attach_args pa;
103 1.1 jruoho uint32_t val = 0;
104 1.1 jruoho
105 1.1 jruoho if (acpicpu_md_cpus_running() == 1)
106 1.1 jruoho val |= ACPICPU_FLAG_C_BM;
107 1.1 jruoho
108 1.1 jruoho if ((ci->ci_feat_val[1] & CPUID2_MONITOR) != 0)
109 1.5 jruoho val |= ACPICPU_FLAG_C_FFH;
110 1.1 jruoho
111 1.1 jruoho switch (cpu_vendor) {
112 1.1 jruoho
113 1.1 jruoho case CPUVENDOR_INTEL:
114 1.5 jruoho
115 1.5 jruoho if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
116 1.5 jruoho val |= ACPICPU_FLAG_P_FFH;
117 1.5 jruoho
118 1.10 jruoho if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
119 1.10 jruoho val |= ACPICPU_FLAG_T_FFH;
120 1.10 jruoho
121 1.12 jruoho val |= ACPICPU_FLAG_C_BM | ACPICPU_FLAG_C_ARB;
122 1.12 jruoho
123 1.1 jruoho /*
124 1.1 jruoho * Bus master arbitration is not
125 1.1 jruoho * needed on some recent Intel CPUs.
126 1.1 jruoho */
127 1.1 jruoho if (CPUID2FAMILY(ci->ci_signature) > 15)
128 1.1 jruoho val &= ~ACPICPU_FLAG_C_ARB;
129 1.1 jruoho
130 1.1 jruoho if (CPUID2FAMILY(ci->ci_signature) == 6 &&
131 1.1 jruoho CPUID2MODEL(ci->ci_signature) >= 15)
132 1.1 jruoho val &= ~ACPICPU_FLAG_C_ARB;
133 1.1 jruoho
134 1.1 jruoho break;
135 1.1 jruoho
136 1.1 jruoho case CPUVENDOR_AMD:
137 1.1 jruoho
138 1.1 jruoho /*
139 1.13 jruoho * XXX: Deal with (non-XPSS) PowerNow! and C1E.
140 1.1 jruoho */
141 1.1 jruoho break;
142 1.1 jruoho }
143 1.1 jruoho
144 1.12 jruoho /*
145 1.12 jruoho * There are several erratums for PIIX4.
146 1.12 jruoho */
147 1.12 jruoho if (pci_find_device(&pa, acpicpu_md_quirks_piix4) != 0)
148 1.12 jruoho val |= ACPICPU_FLAG_PIIX4;
149 1.12 jruoho
150 1.1 jruoho return val;
151 1.1 jruoho }
152 1.1 jruoho
153 1.12 jruoho static int
154 1.12 jruoho acpicpu_md_quirks_piix4(struct pci_attach_args *pa)
155 1.12 jruoho {
156 1.12 jruoho
157 1.12 jruoho /*
158 1.12 jruoho * XXX: The pci_find_device(9) function only
159 1.12 jruoho * deals with attached devices. Change this
160 1.12 jruoho * to use something like pci_device_foreach().
161 1.12 jruoho */
162 1.12 jruoho if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
163 1.12 jruoho return 0;
164 1.12 jruoho
165 1.12 jruoho if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA ||
166 1.12 jruoho PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC)
167 1.12 jruoho return 1;
168 1.12 jruoho
169 1.12 jruoho return 0;
170 1.12 jruoho }
171 1.12 jruoho
172 1.1 jruoho uint32_t
173 1.1 jruoho acpicpu_md_cpus_running(void)
174 1.1 jruoho {
175 1.1 jruoho
176 1.1 jruoho return popcount32(cpus_running);
177 1.1 jruoho }
178 1.1 jruoho
179 1.1 jruoho int
180 1.8 jruoho acpicpu_md_idle_start(void)
181 1.1 jruoho {
182 1.1 jruoho const size_t size = sizeof(native_idle_text);
183 1.1 jruoho
184 1.1 jruoho x86_disable_intr();
185 1.1 jruoho x86_cpu_idle_get(&native_idle, native_idle_text, size);
186 1.1 jruoho x86_cpu_idle_set(acpicpu_cstate_idle, "acpi");
187 1.1 jruoho x86_enable_intr();
188 1.1 jruoho
189 1.1 jruoho return 0;
190 1.1 jruoho }
191 1.1 jruoho
192 1.1 jruoho int
193 1.1 jruoho acpicpu_md_idle_stop(void)
194 1.1 jruoho {
195 1.4 jruoho uint64_t xc;
196 1.1 jruoho
197 1.1 jruoho x86_disable_intr();
198 1.1 jruoho x86_cpu_idle_set(native_idle, native_idle_text);
199 1.1 jruoho x86_enable_intr();
200 1.1 jruoho
201 1.4 jruoho /*
202 1.4 jruoho * Run a cross-call to ensure that all CPUs are
203 1.4 jruoho * out from the ACPI idle-loop before detachment.
204 1.4 jruoho */
205 1.4 jruoho xc = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
206 1.4 jruoho xc_wait(xc);
207 1.1 jruoho
208 1.1 jruoho return 0;
209 1.1 jruoho }
210 1.1 jruoho
211 1.3 jruoho /*
212 1.3 jruoho * The MD idle loop. Called with interrupts disabled.
213 1.3 jruoho */
214 1.1 jruoho void
215 1.1 jruoho acpicpu_md_idle_enter(int method, int state)
216 1.1 jruoho {
217 1.3 jruoho struct cpu_info *ci = curcpu();
218 1.1 jruoho
219 1.1 jruoho switch (method) {
220 1.1 jruoho
221 1.1 jruoho case ACPICPU_C_STATE_FFH:
222 1.3 jruoho
223 1.3 jruoho x86_enable_intr();
224 1.3 jruoho x86_monitor(&ci->ci_want_resched, 0, 0);
225 1.3 jruoho
226 1.3 jruoho if (__predict_false(ci->ci_want_resched) != 0)
227 1.3 jruoho return;
228 1.3 jruoho
229 1.1 jruoho x86_mwait((state - 1) << 4, 0);
230 1.1 jruoho break;
231 1.1 jruoho
232 1.1 jruoho case ACPICPU_C_STATE_HALT:
233 1.3 jruoho
234 1.3 jruoho if (__predict_false(ci->ci_want_resched) != 0) {
235 1.3 jruoho x86_enable_intr();
236 1.3 jruoho return;
237 1.3 jruoho }
238 1.3 jruoho
239 1.1 jruoho x86_stihlt();
240 1.1 jruoho break;
241 1.1 jruoho }
242 1.1 jruoho }
243 1.5 jruoho
244 1.5 jruoho int
245 1.5 jruoho acpicpu_md_pstate_start(void)
246 1.5 jruoho {
247 1.9 jruoho const struct sysctlnode *fnode, *mnode, *rnode;
248 1.9 jruoho const char *str;
249 1.9 jruoho int rv;
250 1.5 jruoho
251 1.9 jruoho switch (cpu_vendor) {
252 1.9 jruoho
253 1.9 jruoho case CPUVENDOR_INTEL:
254 1.9 jruoho str = "est";
255 1.9 jruoho break;
256 1.9 jruoho
257 1.13 jruoho case CPUVENDOR_AMD:
258 1.13 jruoho str = "powernow";
259 1.13 jruoho break;
260 1.13 jruoho
261 1.9 jruoho default:
262 1.9 jruoho return ENODEV;
263 1.9 jruoho }
264 1.9 jruoho
265 1.9 jruoho /*
266 1.9 jruoho * A kludge for backwards compatibility.
267 1.9 jruoho */
268 1.9 jruoho native_cpu_freq_init = cpu_freq_init;
269 1.9 jruoho
270 1.9 jruoho if (cpu_freq_sysctllog != NULL) {
271 1.9 jruoho sysctl_teardown(&cpu_freq_sysctllog);
272 1.9 jruoho cpu_freq_sysctllog = NULL;
273 1.9 jruoho }
274 1.9 jruoho
275 1.9 jruoho rv = sysctl_createv(&cpu_freq_sysctllog, 0, NULL, &rnode,
276 1.9 jruoho CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
277 1.9 jruoho NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
278 1.9 jruoho
279 1.9 jruoho if (rv != 0)
280 1.9 jruoho goto fail;
281 1.9 jruoho
282 1.9 jruoho rv = sysctl_createv(&cpu_freq_sysctllog, 0, &rnode, &mnode,
283 1.9 jruoho 0, CTLTYPE_NODE, str, NULL,
284 1.9 jruoho NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
285 1.9 jruoho
286 1.9 jruoho if (rv != 0)
287 1.9 jruoho goto fail;
288 1.9 jruoho
289 1.9 jruoho rv = sysctl_createv(&cpu_freq_sysctllog, 0, &mnode, &fnode,
290 1.9 jruoho 0, CTLTYPE_NODE, "frequency", NULL,
291 1.9 jruoho NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
292 1.9 jruoho
293 1.9 jruoho if (rv != 0)
294 1.9 jruoho goto fail;
295 1.9 jruoho
296 1.9 jruoho rv = sysctl_createv(&cpu_freq_sysctllog, 0, &fnode, &rnode,
297 1.9 jruoho CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL,
298 1.9 jruoho acpicpu_md_pstate_sysctl_set, 0, NULL, 0, CTL_CREATE, CTL_EOL);
299 1.9 jruoho
300 1.9 jruoho if (rv != 0)
301 1.9 jruoho goto fail;
302 1.9 jruoho
303 1.9 jruoho rv = sysctl_createv(&cpu_freq_sysctllog, 0, &fnode, &rnode,
304 1.9 jruoho CTLFLAG_READONLY, CTLTYPE_INT, "current", NULL,
305 1.9 jruoho acpicpu_md_pstate_sysctl_get, 0, NULL, 0, CTL_CREATE, CTL_EOL);
306 1.9 jruoho
307 1.9 jruoho if (rv != 0)
308 1.9 jruoho goto fail;
309 1.9 jruoho
310 1.9 jruoho rv = sysctl_createv(&cpu_freq_sysctllog, 0, &fnode, &rnode,
311 1.9 jruoho CTLFLAG_READONLY, CTLTYPE_STRING, "available", NULL,
312 1.9 jruoho acpicpu_md_pstate_sysctl_all, 0, NULL, 0, CTL_CREATE, CTL_EOL);
313 1.9 jruoho
314 1.9 jruoho if (rv != 0)
315 1.9 jruoho goto fail;
316 1.5 jruoho
317 1.5 jruoho return 0;
318 1.9 jruoho
319 1.9 jruoho fail:
320 1.9 jruoho if (cpu_freq_sysctllog != NULL) {
321 1.9 jruoho sysctl_teardown(&cpu_freq_sysctllog);
322 1.9 jruoho cpu_freq_sysctllog = NULL;
323 1.9 jruoho }
324 1.9 jruoho
325 1.9 jruoho if (native_cpu_freq_init != NULL)
326 1.9 jruoho (*native_cpu_freq_init)(cpu_vendor);
327 1.9 jruoho
328 1.9 jruoho return rv;
329 1.5 jruoho }
330 1.5 jruoho
331 1.5 jruoho int
332 1.5 jruoho acpicpu_md_pstate_stop(void)
333 1.5 jruoho {
334 1.5 jruoho
335 1.9 jruoho if (cpu_freq_sysctllog != NULL) {
336 1.9 jruoho sysctl_teardown(&cpu_freq_sysctllog);
337 1.9 jruoho cpu_freq_sysctllog = NULL;
338 1.9 jruoho }
339 1.9 jruoho
340 1.9 jruoho if (native_cpu_freq_init != NULL)
341 1.9 jruoho (*native_cpu_freq_init)(cpu_vendor);
342 1.5 jruoho
343 1.5 jruoho return 0;
344 1.5 jruoho }
345 1.5 jruoho
346 1.5 jruoho static int
347 1.5 jruoho acpicpu_md_pstate_sysctl_get(SYSCTLFN_ARGS)
348 1.5 jruoho {
349 1.5 jruoho struct cpu_info *ci = curcpu();
350 1.5 jruoho struct acpicpu_softc *sc;
351 1.5 jruoho struct sysctlnode node;
352 1.5 jruoho uint32_t freq;
353 1.5 jruoho int err;
354 1.5 jruoho
355 1.5 jruoho sc = acpicpu_sc[ci->ci_acpiid];
356 1.5 jruoho
357 1.5 jruoho if (sc == NULL)
358 1.5 jruoho return ENXIO;
359 1.5 jruoho
360 1.5 jruoho err = acpicpu_pstate_get(sc, &freq);
361 1.5 jruoho
362 1.5 jruoho if (err != 0)
363 1.5 jruoho return err;
364 1.5 jruoho
365 1.5 jruoho node = *rnode;
366 1.5 jruoho node.sysctl_data = &freq;
367 1.5 jruoho
368 1.5 jruoho err = sysctl_lookup(SYSCTLFN_CALL(&node));
369 1.5 jruoho
370 1.5 jruoho if (err != 0 || newp == NULL)
371 1.5 jruoho return err;
372 1.5 jruoho
373 1.5 jruoho return 0;
374 1.5 jruoho }
375 1.5 jruoho
376 1.5 jruoho static int
377 1.5 jruoho acpicpu_md_pstate_sysctl_set(SYSCTLFN_ARGS)
378 1.5 jruoho {
379 1.5 jruoho struct cpu_info *ci = curcpu();
380 1.5 jruoho struct acpicpu_softc *sc;
381 1.5 jruoho struct sysctlnode node;
382 1.5 jruoho uint32_t freq;
383 1.5 jruoho int err;
384 1.5 jruoho
385 1.5 jruoho sc = acpicpu_sc[ci->ci_acpiid];
386 1.5 jruoho
387 1.5 jruoho if (sc == NULL)
388 1.5 jruoho return ENXIO;
389 1.5 jruoho
390 1.5 jruoho err = acpicpu_pstate_get(sc, &freq);
391 1.5 jruoho
392 1.5 jruoho if (err != 0)
393 1.5 jruoho return err;
394 1.5 jruoho
395 1.5 jruoho node = *rnode;
396 1.5 jruoho node.sysctl_data = &freq;
397 1.5 jruoho
398 1.5 jruoho err = sysctl_lookup(SYSCTLFN_CALL(&node));
399 1.5 jruoho
400 1.5 jruoho if (err != 0 || newp == NULL)
401 1.5 jruoho return err;
402 1.5 jruoho
403 1.5 jruoho err = acpicpu_pstate_set(sc, freq);
404 1.5 jruoho
405 1.5 jruoho if (err != 0)
406 1.5 jruoho return err;
407 1.5 jruoho
408 1.5 jruoho return 0;
409 1.5 jruoho }
410 1.5 jruoho
411 1.5 jruoho static int
412 1.5 jruoho acpicpu_md_pstate_sysctl_all(SYSCTLFN_ARGS)
413 1.5 jruoho {
414 1.5 jruoho struct cpu_info *ci = curcpu();
415 1.5 jruoho struct acpicpu_softc *sc;
416 1.5 jruoho struct sysctlnode node;
417 1.5 jruoho char buf[1024];
418 1.5 jruoho size_t len;
419 1.5 jruoho uint32_t i;
420 1.5 jruoho int err;
421 1.5 jruoho
422 1.5 jruoho sc = acpicpu_sc[ci->ci_acpiid];
423 1.5 jruoho
424 1.5 jruoho if (sc == NULL)
425 1.5 jruoho return ENXIO;
426 1.5 jruoho
427 1.5 jruoho (void)memset(&buf, 0, sizeof(buf));
428 1.5 jruoho
429 1.5 jruoho mutex_enter(&sc->sc_mtx);
430 1.5 jruoho
431 1.5 jruoho for (len = 0, i = sc->sc_pstate_max; i < sc->sc_pstate_count; i++) {
432 1.5 jruoho
433 1.5 jruoho if (sc->sc_pstate[i].ps_freq == 0)
434 1.5 jruoho continue;
435 1.5 jruoho
436 1.5 jruoho len += snprintf(buf + len, sizeof(buf) - len, "%u%s",
437 1.5 jruoho sc->sc_pstate[i].ps_freq,
438 1.5 jruoho i < (sc->sc_pstate_count - 1) ? " " : "");
439 1.5 jruoho }
440 1.5 jruoho
441 1.5 jruoho mutex_exit(&sc->sc_mtx);
442 1.5 jruoho
443 1.5 jruoho node = *rnode;
444 1.5 jruoho node.sysctl_data = buf;
445 1.5 jruoho
446 1.5 jruoho err = sysctl_lookup(SYSCTLFN_CALL(&node));
447 1.5 jruoho
448 1.5 jruoho if (err != 0 || newp == NULL)
449 1.5 jruoho return err;
450 1.5 jruoho
451 1.5 jruoho return 0;
452 1.5 jruoho }
453 1.5 jruoho
454 1.5 jruoho int
455 1.5 jruoho acpicpu_md_pstate_get(struct acpicpu_softc *sc, uint32_t *freq)
456 1.5 jruoho {
457 1.13 jruoho struct acpicpu_pstate *ps = NULL;
458 1.5 jruoho uint64_t val;
459 1.5 jruoho uint32_t i;
460 1.5 jruoho
461 1.13 jruoho for (i = 0; i < sc->sc_pstate_count; i++) {
462 1.13 jruoho
463 1.13 jruoho ps = &sc->sc_pstate[i];
464 1.13 jruoho
465 1.13 jruoho if (ps->ps_freq != 0)
466 1.13 jruoho break;
467 1.13 jruoho }
468 1.13 jruoho
469 1.13 jruoho if (__predict_false(ps == NULL))
470 1.13 jruoho return EINVAL;
471 1.13 jruoho
472 1.5 jruoho switch (cpu_vendor) {
473 1.5 jruoho
474 1.5 jruoho case CPUVENDOR_INTEL:
475 1.13 jruoho ps->ps_status_addr = MSR_PERF_STATUS;
476 1.13 jruoho ps->ps_status_mask = __BITS(0, 15);
477 1.13 jruoho break;
478 1.13 jruoho
479 1.13 jruoho case CPUVENDOR_AMD:
480 1.13 jruoho
481 1.13 jruoho if ((ps->ps_flags & ACPICPU_FLAG_P_XPSS) == 0)
482 1.13 jruoho return EOPNOTSUPP;
483 1.13 jruoho
484 1.13 jruoho break;
485 1.13 jruoho
486 1.13 jruoho default:
487 1.13 jruoho return ENODEV;
488 1.13 jruoho }
489 1.5 jruoho
490 1.13 jruoho if (ps->ps_status_addr == 0)
491 1.13 jruoho return EINVAL;
492 1.5 jruoho
493 1.13 jruoho val = rdmsr(ps->ps_status_addr);
494 1.5 jruoho
495 1.13 jruoho if (ps->ps_status_mask != 0)
496 1.13 jruoho val = val & ps->ps_status_mask;
497 1.5 jruoho
498 1.13 jruoho for (i = 0; i < sc->sc_pstate_count; i++) {
499 1.5 jruoho
500 1.13 jruoho ps = &sc->sc_pstate[i];
501 1.5 jruoho
502 1.13 jruoho if (ps->ps_freq == 0)
503 1.13 jruoho continue;
504 1.5 jruoho
505 1.13 jruoho if (val == ps->ps_status) {
506 1.13 jruoho *freq = ps->ps_freq;
507 1.13 jruoho return 0;
508 1.13 jruoho }
509 1.5 jruoho }
510 1.5 jruoho
511 1.13 jruoho return EIO;
512 1.5 jruoho }
513 1.5 jruoho
514 1.5 jruoho int
515 1.5 jruoho acpicpu_md_pstate_set(struct acpicpu_pstate *ps)
516 1.5 jruoho {
517 1.5 jruoho struct msr_rw_info msr;
518 1.14 jruoho uint64_t xc;
519 1.14 jruoho int rv = 0;
520 1.5 jruoho
521 1.5 jruoho switch (cpu_vendor) {
522 1.5 jruoho
523 1.5 jruoho case CPUVENDOR_INTEL:
524 1.13 jruoho ps->ps_control_addr = MSR_PERF_CTL;
525 1.13 jruoho ps->ps_control_mask = __BITS(0, 15);
526 1.13 jruoho
527 1.13 jruoho ps->ps_status_addr = MSR_PERF_STATUS;
528 1.13 jruoho ps->ps_status_mask = __BITS(0, 15);
529 1.13 jruoho break;
530 1.13 jruoho
531 1.13 jruoho case CPUVENDOR_AMD:
532 1.13 jruoho
533 1.13 jruoho if ((ps->ps_flags & ACPICPU_FLAG_P_XPSS) == 0)
534 1.13 jruoho return EOPNOTSUPP;
535 1.13 jruoho
536 1.5 jruoho break;
537 1.5 jruoho
538 1.5 jruoho default:
539 1.5 jruoho return ENODEV;
540 1.5 jruoho }
541 1.5 jruoho
542 1.13 jruoho msr.msr_read = false;
543 1.13 jruoho msr.msr_type = ps->ps_control_addr;
544 1.13 jruoho msr.msr_value = ps->ps_control;
545 1.13 jruoho
546 1.13 jruoho if (ps->ps_control_mask != 0) {
547 1.13 jruoho msr.msr_mask = ps->ps_control_mask;
548 1.13 jruoho msr.msr_read = true;
549 1.13 jruoho }
550 1.13 jruoho
551 1.5 jruoho xc = xc_broadcast(0, (xcfunc_t)x86_msr_xcall, &msr, NULL);
552 1.5 jruoho xc_wait(xc);
553 1.5 jruoho
554 1.13 jruoho if (ps->ps_status_addr == 0)
555 1.13 jruoho return 0;
556 1.13 jruoho
557 1.14 jruoho xc = xc_broadcast(0, (xcfunc_t)acpicpu_md_pstate_status, ps, &rv);
558 1.14 jruoho xc_wait(xc);
559 1.14 jruoho
560 1.14 jruoho return rv;
561 1.14 jruoho }
562 1.14 jruoho
563 1.14 jruoho static void
564 1.14 jruoho acpicpu_md_pstate_status(void *arg1, void *arg2)
565 1.14 jruoho {
566 1.14 jruoho struct acpicpu_pstate *ps = arg1;
567 1.14 jruoho uint64_t val;
568 1.14 jruoho int i;
569 1.14 jruoho
570 1.5 jruoho for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
571 1.5 jruoho
572 1.13 jruoho val = rdmsr(ps->ps_status_addr);
573 1.13 jruoho
574 1.13 jruoho if (ps->ps_status_mask != 0)
575 1.13 jruoho val = val & ps->ps_status_mask;
576 1.5 jruoho
577 1.5 jruoho if (val == ps->ps_status)
578 1.14 jruoho return;
579 1.5 jruoho
580 1.5 jruoho DELAY(ps->ps_latency);
581 1.5 jruoho }
582 1.5 jruoho
583 1.14 jruoho *(uintptr_t *)arg2 = EAGAIN;
584 1.5 jruoho }
585 1.10 jruoho
586 1.10 jruoho int
587 1.10 jruoho acpicpu_md_tstate_get(struct acpicpu_softc *sc, uint32_t *percent)
588 1.10 jruoho {
589 1.10 jruoho struct acpicpu_tstate *ts;
590 1.14 jruoho uint64_t val;
591 1.10 jruoho uint32_t i;
592 1.10 jruoho
593 1.14 jruoho if (cpu_vendor != CPUVENDOR_INTEL)
594 1.13 jruoho return ENODEV;
595 1.10 jruoho
596 1.14 jruoho val = rdmsr(MSR_THERM_CONTROL);
597 1.10 jruoho
598 1.10 jruoho for (i = 0; i < sc->sc_tstate_count; i++) {
599 1.10 jruoho
600 1.10 jruoho ts = &sc->sc_tstate[i];
601 1.10 jruoho
602 1.10 jruoho if (ts->ts_percent == 0)
603 1.10 jruoho continue;
604 1.10 jruoho
605 1.10 jruoho if (val == ts->ts_control || val == ts->ts_status) {
606 1.10 jruoho *percent = ts->ts_percent;
607 1.10 jruoho return 0;
608 1.10 jruoho }
609 1.10 jruoho }
610 1.10 jruoho
611 1.10 jruoho return EIO;
612 1.10 jruoho }
613 1.10 jruoho
614 1.10 jruoho int
615 1.10 jruoho acpicpu_md_tstate_set(struct acpicpu_tstate *ts)
616 1.10 jruoho {
617 1.10 jruoho struct msr_rw_info msr;
618 1.14 jruoho uint64_t xc;
619 1.14 jruoho int rv = 0;
620 1.10 jruoho
621 1.14 jruoho if (cpu_vendor != CPUVENDOR_INTEL)
622 1.14 jruoho return ENODEV;
623 1.13 jruoho
624 1.14 jruoho msr.msr_read = true;
625 1.14 jruoho msr.msr_type = MSR_THERM_CONTROL;
626 1.14 jruoho msr.msr_value = ts->ts_control;
627 1.14 jruoho msr.msr_mask = __BITS(1, 4);
628 1.10 jruoho
629 1.10 jruoho xc = xc_broadcast(0, (xcfunc_t)x86_msr_xcall, &msr, NULL);
630 1.10 jruoho xc_wait(xc);
631 1.10 jruoho
632 1.10 jruoho if (ts->ts_status == 0)
633 1.10 jruoho return 0;
634 1.10 jruoho
635 1.14 jruoho xc = xc_broadcast(0, (xcfunc_t)acpicpu_md_tstate_status, ts, &rv);
636 1.14 jruoho xc_wait(xc);
637 1.14 jruoho
638 1.14 jruoho return rv;
639 1.14 jruoho }
640 1.14 jruoho
641 1.14 jruoho static void
642 1.14 jruoho acpicpu_md_tstate_status(void *arg1, void *arg2)
643 1.14 jruoho {
644 1.14 jruoho struct acpicpu_tstate *ts = arg1;
645 1.14 jruoho uint64_t val;
646 1.14 jruoho int i;
647 1.14 jruoho
648 1.10 jruoho for (i = val = 0; i < ACPICPU_T_STATE_RETRY; i++) {
649 1.10 jruoho
650 1.14 jruoho val = rdmsr(MSR_THERM_CONTROL);
651 1.10 jruoho
652 1.10 jruoho if (val == ts->ts_status)
653 1.14 jruoho return;
654 1.10 jruoho
655 1.10 jruoho DELAY(ts->ts_latency);
656 1.10 jruoho }
657 1.10 jruoho
658 1.14 jruoho *(uintptr_t *)arg2 = EAGAIN;
659 1.10 jruoho }
660