cpu_subr.c revision 1.29 1 /* $NetBSD: cpu_subr.c,v 1.29 2007/05/17 14:51:26 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2001 Matt Thomas.
5 * Copyright (c) 2001 Tsubai Masanari.
6 * Copyright (c) 1998, 1999, 2001 Internet Research Institute, Inc.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by
20 * Internet Research Institute, Inc.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.29 2007/05/17 14:51:26 yamt Exp $");
38
39 #include "opt_ppcparam.h"
40 #include "opt_multiprocessor.h"
41 #include "opt_altivec.h"
42 #include "sysmon_envsys.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/device.h>
47 #include <sys/malloc.h>
48
49 #include <uvm/uvm_extern.h>
50
51 #include <powerpc/oea/hid.h>
52 #include <powerpc/oea/hid_601.h>
53 #include <powerpc/spr.h>
54
55 #include <dev/sysmon/sysmonvar.h>
56
57 static void cpu_enable_l2cr(register_t);
58 static void cpu_enable_l3cr(register_t);
59 static void cpu_config_l2cr(int);
60 static void cpu_config_l3cr(int);
61 static void cpu_probe_speed(struct cpu_info *);
62 static void cpu_idlespin(void);
63 #if NSYSMON_ENVSYS > 0
64 static void cpu_tau_setup(struct cpu_info *);
65 static int cpu_tau_gtredata __P((struct sysmon_envsys *,
66 struct envsys_tre_data *));
67 static int cpu_tau_streinfo __P((struct sysmon_envsys *,
68 struct envsys_basic_info *));
69 #endif
70
71 int cpu;
72 int ncpus;
73
74 struct fmttab {
75 register_t fmt_mask;
76 register_t fmt_value;
77 const char *fmt_string;
78 };
79
80 static const struct fmttab cpu_7450_l2cr_formats[] = {
81 { L2CR_L2E, 0, " disabled" },
82 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" },
83 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" },
84 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" },
85 { L2CR_L2E, ~0, " 256KB L2 cache" },
86 { 0, 0, NULL }
87 };
88
89 static const struct fmttab cpu_7448_l2cr_formats[] = {
90 { L2CR_L2E, 0, " disabled" },
91 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" },
92 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" },
93 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" },
94 { L2CR_L2E, ~0, " 1MB L2 cache" },
95 { 0, 0, NULL }
96 };
97
98 static const struct fmttab cpu_7457_l2cr_formats[] = {
99 { L2CR_L2E, 0, " disabled" },
100 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" },
101 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" },
102 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" },
103 { L2CR_L2E, ~0, " 512KB L2 cache" },
104 { 0, 0, NULL }
105 };
106
107 static const struct fmttab cpu_7450_l3cr_formats[] = {
108 { L3CR_L3DO|L3CR_L3IO, L3CR_L3DO, " data-only" },
109 { L3CR_L3DO|L3CR_L3IO, L3CR_L3IO, " instruction-only" },
110 { L3CR_L3DO|L3CR_L3IO, L3CR_L3DO|L3CR_L3IO, " locked" },
111 { L3CR_L3SIZ, L3SIZ_2M, " 2MB" },
112 { L3CR_L3SIZ, L3SIZ_1M, " 1MB" },
113 { L3CR_L3PE|L3CR_L3APE, L3CR_L3PE|L3CR_L3APE, " parity" },
114 { L3CR_L3PE|L3CR_L3APE, L3CR_L3PE, " data-parity" },
115 { L3CR_L3PE|L3CR_L3APE, L3CR_L3APE, " address-parity" },
116 { L3CR_L3PE|L3CR_L3APE, 0, " no-parity" },
117 { L3CR_L3SIZ, ~0, " L3 cache" },
118 { L3CR_L3RT, L3RT_MSUG2_DDR, " (DDR SRAM)" },
119 { L3CR_L3RT, L3RT_PIPELINE_LATE, " (LW SRAM)" },
120 { L3CR_L3RT, L3RT_PB2_SRAM, " (PB2 SRAM)" },
121 { L3CR_L3CLK, ~0, " at" },
122 { L3CR_L3CLK, L3CLK_20, " 2:1" },
123 { L3CR_L3CLK, L3CLK_25, " 2.5:1" },
124 { L3CR_L3CLK, L3CLK_30, " 3:1" },
125 { L3CR_L3CLK, L3CLK_35, " 3.5:1" },
126 { L3CR_L3CLK, L3CLK_40, " 4:1" },
127 { L3CR_L3CLK, L3CLK_50, " 5:1" },
128 { L3CR_L3CLK, L3CLK_60, " 6:1" },
129 { L3CR_L3CLK, ~0, " ratio" },
130 { 0, 0, NULL },
131 };
132
133 static const struct fmttab cpu_ibm750_l2cr_formats[] = {
134 { L2CR_L2E, 0, " disabled" },
135 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" },
136 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" },
137 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" },
138 { 0, ~0, " 512KB" },
139 { L2CR_L2WT, L2CR_L2WT, " WT" },
140 { L2CR_L2WT, 0, " WB" },
141 { L2CR_L2PE, L2CR_L2PE, " with ECC" },
142 { 0, ~0, " L2 cache" },
143 { 0, 0, NULL }
144 };
145
146 static const struct fmttab cpu_l2cr_formats[] = {
147 { L2CR_L2E, 0, " disabled" },
148 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO, " data-only" },
149 { L2CR_L2DO|L2CR_L2IO, L2CR_L2IO, " instruction-only" },
150 { L2CR_L2DO|L2CR_L2IO, L2CR_L2DO|L2CR_L2IO, " locked" },
151 { L2CR_L2PE, L2CR_L2PE, " parity" },
152 { L2CR_L2PE, 0, " no-parity" },
153 { L2CR_L2SIZ, L2SIZ_2M, " 2MB" },
154 { L2CR_L2SIZ, L2SIZ_1M, " 1MB" },
155 { L2CR_L2SIZ, L2SIZ_512K, " 512KB" },
156 { L2CR_L2SIZ, L2SIZ_256K, " 256KB" },
157 { L2CR_L2WT, L2CR_L2WT, " WT" },
158 { L2CR_L2WT, 0, " WB" },
159 { L2CR_L2E, ~0, " L2 cache" },
160 { L2CR_L2RAM, L2RAM_FLOWTHRU_BURST, " (FB SRAM)" },
161 { L2CR_L2RAM, L2RAM_PIPELINE_LATE, " (LW SRAM)" },
162 { L2CR_L2RAM, L2RAM_PIPELINE_BURST, " (PB SRAM)" },
163 { L2CR_L2CLK, ~0, " at" },
164 { L2CR_L2CLK, L2CLK_10, " 1:1" },
165 { L2CR_L2CLK, L2CLK_15, " 1.5:1" },
166 { L2CR_L2CLK, L2CLK_20, " 2:1" },
167 { L2CR_L2CLK, L2CLK_25, " 2.5:1" },
168 { L2CR_L2CLK, L2CLK_30, " 3:1" },
169 { L2CR_L2CLK, L2CLK_35, " 3.5:1" },
170 { L2CR_L2CLK, L2CLK_40, " 4:1" },
171 { L2CR_L2CLK, ~0, " ratio" },
172 { 0, 0, NULL }
173 };
174
175 static void cpu_fmttab_print(const struct fmttab *, register_t);
176
177 struct cputab {
178 const char name[8];
179 uint16_t version;
180 uint16_t revfmt;
181 };
182 #define REVFMT_MAJMIN 1 /* %u.%u */
183 #define REVFMT_HEX 2 /* 0x%04x */
184 #define REVFMT_DEC 3 /* %u */
185 static const struct cputab models[] = {
186 { "601", MPC601, REVFMT_DEC },
187 { "602", MPC602, REVFMT_DEC },
188 { "603", MPC603, REVFMT_MAJMIN },
189 { "603e", MPC603e, REVFMT_MAJMIN },
190 { "603ev", MPC603ev, REVFMT_MAJMIN },
191 { "604", MPC604, REVFMT_MAJMIN },
192 { "604e", MPC604e, REVFMT_MAJMIN },
193 { "604ev", MPC604ev, REVFMT_MAJMIN },
194 { "620", MPC620, REVFMT_HEX },
195 { "750", MPC750, REVFMT_MAJMIN },
196 { "750FX", IBM750FX, REVFMT_MAJMIN },
197 { "7400", MPC7400, REVFMT_MAJMIN },
198 { "7410", MPC7410, REVFMT_MAJMIN },
199 { "7450", MPC7450, REVFMT_MAJMIN },
200 { "7455", MPC7455, REVFMT_MAJMIN },
201 { "7457", MPC7457, REVFMT_MAJMIN },
202 { "7447A", MPC7447A, REVFMT_MAJMIN },
203 { "7448", MPC7448, REVFMT_MAJMIN },
204 { "8240", MPC8240, REVFMT_MAJMIN },
205 { "970", IBM970, REVFMT_MAJMIN },
206 { "970FX", IBM970FX, REVFMT_MAJMIN },
207 { "", 0, REVFMT_HEX }
208 };
209
210
211 #ifdef MULTIPROCESSOR
212 struct cpu_info cpu_info[CPU_MAXNUM];
213 #else
214 struct cpu_info cpu_info[1];
215 #endif
216
217 int cpu_altivec;
218 int cpu_psluserset, cpu_pslusermod;
219 char cpu_model[80];
220
221 void
222 cpu_fmttab_print(const struct fmttab *fmt, register_t data)
223 {
224 for (; fmt->fmt_mask != 0 || fmt->fmt_value != 0; fmt++) {
225 if ((~fmt->fmt_mask & fmt->fmt_value) != 0 ||
226 (data & fmt->fmt_mask) == fmt->fmt_value)
227 aprint_normal("%s", fmt->fmt_string);
228 }
229 }
230
231 void
232 cpu_idlespin(void)
233 {
234 register_t msr;
235
236 if (powersave <= 0)
237 return;
238
239 __asm volatile(
240 "sync;"
241 "mfmsr %0;"
242 "oris %0,%0,%1@h;" /* enter power saving mode */
243 "mtmsr %0;"
244 "isync;"
245 : "=r"(msr)
246 : "J"(PSL_POW));
247 }
248
249 void
250 cpu_probe_cache(void)
251 {
252 u_int assoc, pvr, vers;
253
254 pvr = mfpvr();
255 vers = pvr >> 16;
256
257
258 /* Presently common across almost all implementations. */
259 curcpu()->ci_ci.dcache_line_size = CACHELINESIZE;
260 curcpu()->ci_ci.icache_line_size = CACHELINESIZE;
261
262
263 switch (vers) {
264 #define K *1024
265 case IBM750FX:
266 case MPC601:
267 case MPC750:
268 case MPC7447A:
269 case MPC7448:
270 case MPC7450:
271 case MPC7455:
272 case MPC7457:
273 curcpu()->ci_ci.dcache_size = 32 K;
274 curcpu()->ci_ci.icache_size = 32 K;
275 assoc = 8;
276 break;
277 case MPC603:
278 curcpu()->ci_ci.dcache_size = 8 K;
279 curcpu()->ci_ci.icache_size = 8 K;
280 assoc = 2;
281 break;
282 case MPC603e:
283 case MPC603ev:
284 case MPC604:
285 case MPC8240:
286 case MPC8245:
287 curcpu()->ci_ci.dcache_size = 16 K;
288 curcpu()->ci_ci.icache_size = 16 K;
289 assoc = 4;
290 break;
291 case MPC604e:
292 case MPC604ev:
293 curcpu()->ci_ci.dcache_size = 32 K;
294 curcpu()->ci_ci.icache_size = 32 K;
295 assoc = 4;
296 break;
297 case IBM970:
298 case IBM970FX:
299 curcpu()->ci_ci.dcache_size = 32 K;
300 curcpu()->ci_ci.icache_size = 64 K;
301 curcpu()->ci_ci.dcache_line_size = 128;
302 curcpu()->ci_ci.icache_line_size = 128;
303 assoc = 2;
304 break;
305
306 default:
307 curcpu()->ci_ci.dcache_size = PAGE_SIZE;
308 curcpu()->ci_ci.icache_size = PAGE_SIZE;
309 assoc = 1;
310 #undef K
311 }
312
313 /*
314 * Possibly recolor.
315 */
316 uvm_page_recolor(atop(curcpu()->ci_ci.dcache_size / assoc));
317 }
318
319 struct cpu_info *
320 cpu_attach_common(struct device *self, int id)
321 {
322 struct cpu_info *ci;
323 u_int pvr, vers;
324
325 ci = &cpu_info[id];
326 #ifndef MULTIPROCESSOR
327 /*
328 * If this isn't the primary CPU, print an error message
329 * and just bail out.
330 */
331 if (id != 0) {
332 aprint_normal(": ID %d\n", id);
333 aprint_normal("%s: processor off-line; multiprocessor support "
334 "not present in kernel\n", self->dv_xname);
335 return (NULL);
336 }
337 #endif
338
339 ci->ci_cpuid = id;
340 ci->ci_intrdepth = -1;
341 ci->ci_dev = self;
342 ci->ci_idlespin = cpu_idlespin;
343
344 pvr = mfpvr();
345 vers = (pvr >> 16) & 0xffff;
346
347 switch (id) {
348 case 0:
349 /* load my cpu_number to PIR */
350 switch (vers) {
351 case MPC601:
352 case MPC604:
353 case MPC604e:
354 case MPC604ev:
355 case MPC7400:
356 case MPC7410:
357 case MPC7447A:
358 case MPC7448:
359 case MPC7450:
360 case MPC7455:
361 case MPC7457:
362 mtspr(SPR_PIR, id);
363 }
364 cpu_setup(self, ci);
365 break;
366 default:
367 if (id >= CPU_MAXNUM) {
368 aprint_normal(": more than %d cpus?\n", CPU_MAXNUM);
369 panic("cpuattach");
370 }
371 #ifndef MULTIPROCESSOR
372 aprint_normal(" not configured\n");
373 return NULL;
374 #else
375 mi_cpu_attach(ci);
376 break;
377 #endif
378 }
379 return (ci);
380 }
381
382 void
383 cpu_setup(self, ci)
384 struct device *self;
385 struct cpu_info *ci;
386 {
387 u_int hid0, pvr, vers;
388 const char *bitmask;
389 char hidbuf[128];
390 char model[80];
391
392 pvr = mfpvr();
393 vers = (pvr >> 16) & 0xffff;
394
395 cpu_identify(model, sizeof(model));
396 aprint_normal(": %s, ID %d%s\n", model, cpu_number(),
397 cpu_number() == 0 ? " (primary)" : "");
398
399 #if defined (PPC_OEA) || defined (PPC_OEA64)
400 hid0 = mfspr(SPR_HID0);
401 #elif defined (PPC_OEA64_BRIDGE)
402 hid0 = mfspr(SPR_HID0);
403 #endif
404
405 cpu_probe_cache();
406
407 /*
408 * Configure power-saving mode.
409 */
410 switch (vers) {
411 case MPC604:
412 case MPC604e:
413 case MPC604ev:
414 /*
415 * Do not have HID0 support settings, but can support
416 * MSR[POW] off
417 */
418 powersave = 1;
419 break;
420
421 case MPC603:
422 case MPC603e:
423 case MPC603ev:
424 case MPC750:
425 case IBM750FX:
426 case MPC7400:
427 case MPC7410:
428 case MPC8240:
429 case MPC8245:
430 /* Select DOZE mode. */
431 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP);
432 hid0 |= HID0_DOZE | HID0_DPM;
433 powersave = 1;
434 break;
435
436 case MPC7447A:
437 case MPC7448:
438 case MPC7457:
439 case MPC7455:
440 case MPC7450:
441 /* Enable the 7450 branch caches */
442 hid0 |= HID0_SGE | HID0_BTIC;
443 hid0 |= HID0_LRSTK | HID0_FOLD | HID0_BHT;
444 /* Disable BTIC on 7450 Rev 2.0 or earlier */
445 if (vers == MPC7450 && (pvr & 0xFFFF) <= 0x0200)
446 hid0 &= ~HID0_BTIC;
447 /* Select NAP mode. */
448 hid0 &= ~(HID0_HIGH_BAT_EN | HID0_SLEEP);
449 hid0 |= HID0_NAP | HID0_DPM /* | HID0_XBSEN */;
450 powersave = 1;
451 break;
452
453 case IBM970:
454 case IBM970FX:
455 default:
456 /* No power-saving mode is available. */ ;
457 }
458
459 #ifdef NAPMODE
460 switch (vers) {
461 case IBM750FX:
462 case MPC750:
463 case MPC7400:
464 /* Select NAP mode. */
465 hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP);
466 hid0 |= HID0_NAP;
467 break;
468 }
469 #endif
470
471 switch (vers) {
472 case IBM750FX:
473 case MPC750:
474 hid0 &= ~HID0_DBP; /* XXX correct? */
475 hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT;
476 break;
477
478 case MPC7400:
479 case MPC7410:
480 hid0 &= ~HID0_SPD;
481 hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT;
482 hid0 |= HID0_EIEC;
483 break;
484 }
485
486 #if defined (PPC_OEA)
487 mtspr(SPR_HID0, hid0);
488 __asm volatile("sync;isync");
489 #endif
490
491 switch (vers) {
492 case MPC601:
493 bitmask = HID0_601_BITMASK;
494 break;
495 case MPC7450:
496 case MPC7455:
497 case MPC7457:
498 bitmask = HID0_7450_BITMASK;
499 break;
500 case IBM970:
501 case IBM970FX:
502 bitmask = 0;
503 break;
504 default:
505 bitmask = HID0_BITMASK;
506 break;
507 }
508 bitmask_snprintf(hid0, bitmask, hidbuf, sizeof hidbuf);
509 aprint_normal("%s: HID0 %s, powersave: %d\n", self->dv_xname, hidbuf, powersave);
510
511 ci->ci_khz = 0;
512
513 /*
514 * Display speed and cache configuration.
515 */
516 switch (vers) {
517 case MPC604:
518 case MPC604e:
519 case MPC604ev:
520 case MPC750:
521 case IBM750FX:
522 case MPC7400:
523 case MPC7410:
524 case MPC7447A:
525 case MPC7448:
526 case MPC7450:
527 case MPC7455:
528 case MPC7457:
529 aprint_normal("%s: ", self->dv_xname);
530 cpu_probe_speed(ci);
531 aprint_normal("%u.%02u MHz",
532 ci->ci_khz / 1000, (ci->ci_khz / 10) % 100);
533
534 if (vers == IBM750FX || vers == MPC750 ||
535 vers == MPC7400 || vers == MPC7410 || MPC745X_P(vers)) {
536 if (MPC745X_P(vers)) {
537 cpu_config_l3cr(vers);
538 } else {
539 cpu_config_l2cr(pvr);
540 }
541 }
542 aprint_normal("\n");
543 break;
544 }
545
546 #if NSYSMON_ENVSYS > 0
547 /*
548 * Attach MPC750 temperature sensor to the envsys subsystem.
549 * XXX the 74xx series also has this sensor, but it is not
550 * XXX supported by Motorola and may return values that are off by
551 * XXX 35-55 degrees C.
552 */
553 if (vers == MPC750 || vers == IBM750FX)
554 cpu_tau_setup(ci);
555 #endif
556
557 evcnt_attach_dynamic(&ci->ci_ev_clock, EVCNT_TYPE_INTR,
558 NULL, self->dv_xname, "clock");
559 evcnt_attach_dynamic(&ci->ci_ev_softclock, EVCNT_TYPE_INTR,
560 NULL, self->dv_xname, "soft clock");
561 evcnt_attach_dynamic(&ci->ci_ev_softnet, EVCNT_TYPE_INTR,
562 NULL, self->dv_xname, "soft net");
563 evcnt_attach_dynamic(&ci->ci_ev_softserial, EVCNT_TYPE_INTR,
564 NULL, self->dv_xname, "soft serial");
565 evcnt_attach_dynamic(&ci->ci_ev_traps, EVCNT_TYPE_TRAP,
566 NULL, self->dv_xname, "traps");
567 evcnt_attach_dynamic(&ci->ci_ev_kdsi, EVCNT_TYPE_TRAP,
568 &ci->ci_ev_traps, self->dv_xname, "kernel DSI traps");
569 evcnt_attach_dynamic(&ci->ci_ev_udsi, EVCNT_TYPE_TRAP,
570 &ci->ci_ev_traps, self->dv_xname, "user DSI traps");
571 evcnt_attach_dynamic(&ci->ci_ev_udsi_fatal, EVCNT_TYPE_TRAP,
572 &ci->ci_ev_udsi, self->dv_xname, "user DSI failures");
573 evcnt_attach_dynamic(&ci->ci_ev_kisi, EVCNT_TYPE_TRAP,
574 &ci->ci_ev_traps, self->dv_xname, "kernel ISI traps");
575 evcnt_attach_dynamic(&ci->ci_ev_isi, EVCNT_TYPE_TRAP,
576 &ci->ci_ev_traps, self->dv_xname, "user ISI traps");
577 evcnt_attach_dynamic(&ci->ci_ev_isi_fatal, EVCNT_TYPE_TRAP,
578 &ci->ci_ev_isi, self->dv_xname, "user ISI failures");
579 evcnt_attach_dynamic(&ci->ci_ev_scalls, EVCNT_TYPE_TRAP,
580 &ci->ci_ev_traps, self->dv_xname, "system call traps");
581 evcnt_attach_dynamic(&ci->ci_ev_pgm, EVCNT_TYPE_TRAP,
582 &ci->ci_ev_traps, self->dv_xname, "PGM traps");
583 evcnt_attach_dynamic(&ci->ci_ev_fpu, EVCNT_TYPE_TRAP,
584 &ci->ci_ev_traps, self->dv_xname, "FPU unavailable traps");
585 evcnt_attach_dynamic(&ci->ci_ev_fpusw, EVCNT_TYPE_TRAP,
586 &ci->ci_ev_fpu, self->dv_xname, "FPU context switches");
587 evcnt_attach_dynamic(&ci->ci_ev_ali, EVCNT_TYPE_TRAP,
588 &ci->ci_ev_traps, self->dv_xname, "user alignment traps");
589 evcnt_attach_dynamic(&ci->ci_ev_ali_fatal, EVCNT_TYPE_TRAP,
590 &ci->ci_ev_ali, self->dv_xname, "user alignment traps");
591 evcnt_attach_dynamic(&ci->ci_ev_umchk, EVCNT_TYPE_TRAP,
592 &ci->ci_ev_umchk, self->dv_xname, "user MCHK failures");
593 evcnt_attach_dynamic(&ci->ci_ev_vec, EVCNT_TYPE_TRAP,
594 &ci->ci_ev_traps, self->dv_xname, "AltiVec unavailable");
595 #ifdef ALTIVEC
596 if (cpu_altivec) {
597 evcnt_attach_dynamic(&ci->ci_ev_vecsw, EVCNT_TYPE_TRAP,
598 &ci->ci_ev_vec, self->dv_xname, "AltiVec context switches");
599 }
600 #endif
601 }
602
603 void
604 cpu_identify(char *str, size_t len)
605 {
606 u_int pvr, major, minor;
607 uint16_t vers, rev, revfmt;
608 const struct cputab *cp;
609 const char *name;
610 size_t n;
611
612 pvr = mfpvr();
613 vers = pvr >> 16;
614 rev = pvr;
615
616 switch (vers) {
617 case MPC7410:
618 minor = (pvr >> 0) & 0xff;
619 major = minor <= 4 ? 1 : 2;
620 break;
621 default:
622 major = (pvr >> 8) & 0xf;
623 minor = (pvr >> 0) & 0xf;
624 }
625
626 for (cp = models; cp->name[0] != '\0'; cp++) {
627 if (cp->version == vers)
628 break;
629 }
630
631 if (str == NULL) {
632 str = cpu_model;
633 len = sizeof(cpu_model);
634 cpu = vers;
635 }
636
637 revfmt = cp->revfmt;
638 name = cp->name;
639 if (rev == MPC750 && pvr == 15) {
640 name = "755";
641 revfmt = REVFMT_HEX;
642 }
643
644 if (cp->name[0] != '\0') {
645 n = snprintf(str, len, "%s (Revision ", cp->name);
646 } else {
647 n = snprintf(str, len, "Version %#x (Revision ", vers);
648 }
649 if (len > n) {
650 switch (revfmt) {
651 case REVFMT_MAJMIN:
652 snprintf(str + n, len - n, "%u.%u)", major, minor);
653 break;
654 case REVFMT_HEX:
655 snprintf(str + n, len - n, "0x%04x)", rev);
656 break;
657 case REVFMT_DEC:
658 snprintf(str + n, len - n, "%u)", rev);
659 break;
660 }
661 }
662 }
663
664 #ifdef L2CR_CONFIG
665 u_int l2cr_config = L2CR_CONFIG;
666 #else
667 u_int l2cr_config = 0;
668 #endif
669
670 #ifdef L3CR_CONFIG
671 u_int l3cr_config = L3CR_CONFIG;
672 #else
673 u_int l3cr_config = 0;
674 #endif
675
676 void
677 cpu_enable_l2cr(register_t l2cr)
678 {
679 register_t msr, x;
680
681 /* Disable interrupts and set the cache config bits. */
682 msr = mfmsr();
683 mtmsr(msr & ~PSL_EE);
684 #ifdef ALTIVEC
685 if (cpu_altivec)
686 __asm volatile("dssall");
687 #endif
688 __asm volatile("sync");
689 mtspr(SPR_L2CR, l2cr & ~L2CR_L2E);
690 __asm volatile("sync");
691
692 /* Wait for L2 clock to be stable (640 L2 clocks). */
693 delay(100);
694
695 /* Invalidate all L2 contents. */
696 mtspr(SPR_L2CR, l2cr | L2CR_L2I);
697 do {
698 x = mfspr(SPR_L2CR);
699 } while (x & L2CR_L2IP);
700
701 /* Enable L2 cache. */
702 l2cr |= L2CR_L2E;
703 mtspr(SPR_L2CR, l2cr);
704 mtmsr(msr);
705 }
706
707 void
708 cpu_enable_l3cr(register_t l3cr)
709 {
710 register_t x;
711
712 /* By The Book (numbered steps from section 3.7.1.3 of MPC7450UM) */
713
714 /*
715 * 1: Set all L3CR bits for final config except L3E, L3I, L3PE, and
716 * L3CLKEN. (also mask off reserved bits in case they were included
717 * in L3CR_CONFIG)
718 */
719 l3cr &= ~(L3CR_L3E|L3CR_L3I|L3CR_L3PE|L3CR_L3CLKEN|L3CR_RESERVED);
720 mtspr(SPR_L3CR, l3cr);
721
722 /* 2: Set L3CR[5] (otherwise reserved bit) to 1 */
723 l3cr |= 0x04000000;
724 mtspr(SPR_L3CR, l3cr);
725
726 /* 3: Set L3CLKEN to 1*/
727 l3cr |= L3CR_L3CLKEN;
728 mtspr(SPR_L3CR, l3cr);
729
730 /* 4/5: Perform a global cache invalidate (ref section 3.7.3.6) */
731 __asm volatile("dssall;sync");
732 /* L3 cache is already disabled, no need to clear L3E */
733 mtspr(SPR_L3CR, l3cr|L3CR_L3I);
734 do {
735 x = mfspr(SPR_L3CR);
736 } while (x & L3CR_L3I);
737
738 /* 6: Clear L3CLKEN to 0 */
739 l3cr &= ~L3CR_L3CLKEN;
740 mtspr(SPR_L3CR, l3cr);
741
742 /* 7: Perform a 'sync' and wait at least 100 CPU cycles */
743 __asm volatile("sync");
744 delay(100);
745
746 /* 8: Set L3E and L3CLKEN */
747 l3cr |= (L3CR_L3E|L3CR_L3CLKEN);
748 mtspr(SPR_L3CR, l3cr);
749
750 /* 9: Perform a 'sync' and wait at least 100 CPU cycles */
751 __asm volatile("sync");
752 delay(100);
753 }
754
755 void
756 cpu_config_l2cr(int pvr)
757 {
758 register_t l2cr;
759
760 l2cr = mfspr(SPR_L2CR);
761
762 /*
763 * For MP systems, the firmware may only configure the L2 cache
764 * on the first CPU. In this case, assume that the other CPUs
765 * should use the same value for L2CR.
766 */
767 if ((l2cr & L2CR_L2E) != 0 && l2cr_config == 0) {
768 l2cr_config = l2cr;
769 }
770
771 /*
772 * Configure L2 cache if not enabled.
773 */
774 if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) {
775 cpu_enable_l2cr(l2cr_config);
776 l2cr = mfspr(SPR_L2CR);
777 }
778
779 if ((l2cr & L2CR_L2E) == 0) {
780 aprint_normal(" L2 cache present but not enabled ");
781 return;
782 }
783
784 aprint_normal(",");
785 if ((pvr >> 16) == IBM750FX ||
786 (pvr & 0xffffff00) == 0x00082200 /* IBM750CX */ ||
787 (pvr & 0xffffef00) == 0x00082300 /* IBM750CXe */) {
788 cpu_fmttab_print(cpu_ibm750_l2cr_formats, l2cr);
789 } else {
790 cpu_fmttab_print(cpu_l2cr_formats, l2cr);
791 }
792 }
793
794 void
795 cpu_config_l3cr(int vers)
796 {
797 register_t l2cr;
798 register_t l3cr;
799
800 l2cr = mfspr(SPR_L2CR);
801
802 /*
803 * For MP systems, the firmware may only configure the L2 cache
804 * on the first CPU. In this case, assume that the other CPUs
805 * should use the same value for L2CR.
806 */
807 if ((l2cr & L2CR_L2E) != 0 && l2cr_config == 0) {
808 l2cr_config = l2cr;
809 }
810
811 /*
812 * Configure L2 cache if not enabled.
813 */
814 if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) {
815 cpu_enable_l2cr(l2cr_config);
816 l2cr = mfspr(SPR_L2CR);
817 }
818
819 aprint_normal(",");
820 switch (vers) {
821 case MPC7447A:
822 case MPC7457:
823 cpu_fmttab_print(cpu_7457_l2cr_formats, l2cr);
824 return;
825 case MPC7448:
826 cpu_fmttab_print(cpu_7448_l2cr_formats, l2cr);
827 return;
828 default:
829 cpu_fmttab_print(cpu_7450_l2cr_formats, l2cr);
830 break;
831 }
832
833 l3cr = mfspr(SPR_L3CR);
834
835 /*
836 * For MP systems, the firmware may only configure the L3 cache
837 * on the first CPU. In this case, assume that the other CPUs
838 * should use the same value for L3CR.
839 */
840 if ((l3cr & L3CR_L3E) != 0 && l3cr_config == 0) {
841 l3cr_config = l3cr;
842 }
843
844 /*
845 * Configure L3 cache if not enabled.
846 */
847 if ((l3cr & L3CR_L3E) == 0 && l3cr_config != 0) {
848 cpu_enable_l3cr(l3cr_config);
849 l3cr = mfspr(SPR_L3CR);
850 }
851
852 if (l3cr & L3CR_L3E) {
853 aprint_normal(",");
854 cpu_fmttab_print(cpu_7450_l3cr_formats, l3cr);
855 }
856 }
857
858 void
859 cpu_probe_speed(struct cpu_info *ci)
860 {
861 uint64_t cps;
862
863 mtspr(SPR_MMCR0, MMCR0_FC);
864 mtspr(SPR_PMC1, 0);
865 mtspr(SPR_MMCR0, MMCR0_PMC1SEL(PMCN_CYCLES));
866 delay(100000);
867 cps = (mfspr(SPR_PMC1) * 10) + 4999;
868
869 mtspr(SPR_MMCR0, MMCR0_FC);
870
871 ci->ci_khz = cps / 1000;
872 }
873
874 #if NSYSMON_ENVSYS > 0
875 const struct envsys_range cpu_tau_ranges[] = {
876 { 0, 0, ENVSYS_STEMP}
877 };
878
879 struct envsys_basic_info cpu_tau_info[] = {
880 { 0, ENVSYS_STEMP, "CPU temp", 0, 0, ENVSYS_FVALID}
881 };
882
883 void
884 cpu_tau_setup(struct cpu_info *ci)
885 {
886 struct {
887 struct sysmon_envsys sme;
888 struct envsys_tre_data tau_info;
889 } *datap;
890 int error;
891
892 datap = malloc(sizeof(*datap), M_DEVBUF, M_WAITOK | M_ZERO);
893
894 ci->ci_sysmon_cookie = &datap->sme;
895 datap->sme.sme_nsensors = 1;
896 datap->sme.sme_envsys_version = 1000;
897 datap->sme.sme_ranges = cpu_tau_ranges;
898 datap->sme.sme_sensor_info = cpu_tau_info;
899 datap->sme.sme_sensor_data = &datap->tau_info;
900
901 datap->sme.sme_sensor_data->sensor = 0;
902 datap->sme.sme_sensor_data->warnflags = ENVSYS_WARN_OK;
903 datap->sme.sme_sensor_data->validflags = ENVSYS_FVALID|ENVSYS_FCURVALID;
904 datap->sme.sme_cookie = ci;
905 datap->sme.sme_gtredata = cpu_tau_gtredata;
906 datap->sme.sme_streinfo = cpu_tau_streinfo;
907 datap->sme.sme_flags = 0;
908
909 if ((error = sysmon_envsys_register(&datap->sme)) != 0)
910 aprint_error("%s: unable to register with sysmon (%d)\n",
911 ci->ci_dev->dv_xname, error);
912 }
913
914
915 /* Find the temperature of the CPU. */
916 int
917 cpu_tau_gtredata(struct sysmon_envsys *sme, struct envsys_tre_data *tred)
918 {
919 int i, threshold, count;
920
921 if (tred->sensor != 0) {
922 tred->validflags = 0;
923 return 0;
924 }
925
926 threshold = 64; /* Half of the 7-bit sensor range */
927 mtspr(SPR_THRM1, 0);
928 mtspr(SPR_THRM2, 0);
929 /* XXX This counter is supposed to be "at least 20 microseonds, in
930 * XXX units of clock cycles". Since we don't have convenient
931 * XXX access to the CPU speed, set it to a conservative value,
932 * XXX that is, assuming a fast (1GHz) G3 CPU (As of February 2002,
933 * XXX the fastest G3 processor is 700MHz) . The cost is that
934 * XXX measuring the temperature takes a bit longer.
935 */
936 mtspr(SPR_THRM3, SPR_THRM_TIMER(20000) | SPR_THRM_ENABLE);
937
938 /* Successive-approximation code adapted from Motorola
939 * application note AN1800/D, "Programming the Thermal Assist
940 * Unit in the MPC750 Microprocessor".
941 */
942 for (i = 4; i >= 0 ; i--) {
943 mtspr(SPR_THRM1,
944 SPR_THRM_THRESHOLD(threshold) | SPR_THRM_VALID);
945 count = 0;
946 while ((count < 100) &&
947 ((mfspr(SPR_THRM1) & SPR_THRM_TIV) == 0)) {
948 count++;
949 delay(1);
950 }
951 if (mfspr(SPR_THRM1) & SPR_THRM_TIN) {
952 /* The interrupt bit was set, meaning the
953 * temperature was above the threshold
954 */
955 threshold += 2 << i;
956 } else {
957 /* Temperature was below the threshold */
958 threshold -= 2 << i;
959 }
960 }
961 threshold += 2;
962
963 /* Convert the temperature in degrees C to microkelvin */
964 sme->sme_sensor_data->cur.data_us = (threshold * 1000000) + 273150000;
965
966 *tred = *sme->sme_sensor_data;
967
968 return 0;
969 }
970
971 int
972 cpu_tau_streinfo(struct sysmon_envsys *sme, struct envsys_basic_info *binfo)
973 {
974
975 /* There is nothing to set here. */
976 return (EINVAL);
977 }
978 #endif /* NSYSMON_ENVSYS > 0 */
979