locore.h revision 1.97 1 /* $NetBSD: locore.h,v 1.97 2015/05/02 18:16:17 matt Exp $ */
2
3 /*
4 * This file should not be included by MI code!!!
5 */
6
7 /*
8 * Copyright 1996 The Board of Trustees of The Leland Stanford
9 * Junior University. All Rights Reserved.
10 *
11 * Permission to use, copy, modify, and distribute this
12 * software and its documentation for any purpose and without
13 * fee is hereby granted, provided that the above copyright
14 * notice appear in all copies. Stanford University
15 * makes no representations about the suitability of this
16 * software for any purpose. It is provided "as is" without
17 * express or implied warranty.
18 */
19
20 /*
21 * Jump table for MIPS CPU locore functions that are implemented
22 * differently on different generations, or instruction-level
23 * architecture (ISA) level, the Mips family.
24 *
25 * We currently provide support for MIPS I and MIPS III.
26 */
27
28 #ifndef _MIPS_LOCORE_H
29 #define _MIPS_LOCORE_H
30
31 #if !defined(_LKM) && defined(_KERNEL_OPT)
32 #include "opt_cputype.h"
33 #endif
34
35 #include <mips/mutex.h>
36 #include <mips/cpuregs.h>
37 #include <mips/reg.h>
38
39 struct tlbmask;
40 struct trapframe;
41
42 void trap(uint32_t, uint32_t, vaddr_t, vaddr_t, struct trapframe *);
43 void ast(void);
44
45 void mips_fpu_trap(vaddr_t, struct trapframe *);
46 void mips_fpu_intr(vaddr_t, struct trapframe *);
47
48 vaddr_t mips_emul_branch(struct trapframe *, vaddr_t, uint32_t, bool);
49 void mips_emul_inst(uint32_t, uint32_t, vaddr_t, struct trapframe *);
50
51 void mips_emul_fp(uint32_t, struct trapframe *, uint32_t);
52 void mips_emul_branchdelayslot(uint32_t, struct trapframe *, uint32_t);
53
54 void mips_emul_lwc0(uint32_t, struct trapframe *, uint32_t);
55 void mips_emul_swc0(uint32_t, struct trapframe *, uint32_t);
56 void mips_emul_special(uint32_t, struct trapframe *, uint32_t);
57 void mips_emul_special3(uint32_t, struct trapframe *, uint32_t);
58
59 void mips_emul_lwc1(uint32_t, struct trapframe *, uint32_t);
60 void mips_emul_swc1(uint32_t, struct trapframe *, uint32_t);
61 void mips_emul_ldc1(uint32_t, struct trapframe *, uint32_t);
62 void mips_emul_sdc1(uint32_t, struct trapframe *, uint32_t);
63
64 void mips_emul_lb(uint32_t, struct trapframe *, uint32_t);
65 void mips_emul_lbu(uint32_t, struct trapframe *, uint32_t);
66 void mips_emul_lh(uint32_t, struct trapframe *, uint32_t);
67 void mips_emul_lhu(uint32_t, struct trapframe *, uint32_t);
68 void mips_emul_lw(uint32_t, struct trapframe *, uint32_t);
69 void mips_emul_lwl(uint32_t, struct trapframe *, uint32_t);
70 void mips_emul_lwr(uint32_t, struct trapframe *, uint32_t);
71 #if defined(__mips_n32) || defined(__mips_n64) || defined(__mips_o64)
72 void mips_emul_lwu(uint32_t, struct trapframe *, uint32_t);
73 void mips_emul_ld(uint32_t, struct trapframe *, uint32_t);
74 void mips_emul_ldl(uint32_t, struct trapframe *, uint32_t);
75 void mips_emul_ldr(uint32_t, struct trapframe *, uint32_t);
76 #endif
77 void mips_emul_sb(uint32_t, struct trapframe *, uint32_t);
78 void mips_emul_sh(uint32_t, struct trapframe *, uint32_t);
79 void mips_emul_sw(uint32_t, struct trapframe *, uint32_t);
80 void mips_emul_swl(uint32_t, struct trapframe *, uint32_t);
81 void mips_emul_swr(uint32_t, struct trapframe *, uint32_t);
82 #if defined(__mips_n32) || defined(__mips_n64) || defined(__mips_o64)
83 void mips_emul_sd(uint32_t, struct trapframe *, uint32_t);
84 void mips_emul_sdl(uint32_t, struct trapframe *, uint32_t);
85 void mips_emul_sdr(uint32_t, struct trapframe *, uint32_t);
86 #endif
87
88 uint32_t mips_cp0_cause_read(void);
89 void mips_cp0_cause_write(uint32_t);
90 uint32_t mips_cp0_status_read(void);
91 void mips_cp0_status_write(uint32_t);
92
93 void softint_process(uint32_t);
94 void softint_fast_dispatch(struct lwp *, int);
95
96 /*
97 * Convert an address to an offset used in a MIPS jump instruction. The offset
98 * contains the low 28 bits (allowing a jump to anywhere within the same 256MB
99 * segment of address space) of the address but since mips instructions are
100 * always on a 4 byte boundary the low 2 bits are always zero so the 28 bits
101 * get shifted right by 2 bits leaving us with a 26 bit result. To make the
102 * offset, we shift left to clear the upper four bits and then right by 6.
103 */
104 #define fixup_addr2offset(x) ((((uint32_t)(uintptr_t)(x)) << 4) >> 6)
105 typedef bool (*mips_fixup_callback_t)(int32_t, uint32_t [2]);
106 struct mips_jump_fixup_info {
107 uint32_t jfi_stub;
108 uint32_t jfi_real;
109 };
110
111 void fixup_splcalls(void); /* splstubs.c */
112 bool mips_fixup_exceptions(mips_fixup_callback_t);
113 bool mips_fixup_zero_relative(int32_t, uint32_t [2]);
114 intptr_t
115 mips_fixup_addr(const uint32_t *);
116 void mips_fixup_stubs(uint32_t *, uint32_t *);
117
118 /*
119 * Define these stubs...
120 */
121 void mips_cpu_switch_resume(struct lwp *);
122 void tlb_set_asid(uint32_t);
123 void tlb_invalidate_all(void);
124 void tlb_invalidate_globals(void);
125 void tlb_invalidate_asids(uint32_t, uint32_t);
126 void tlb_invalidate_addr(vaddr_t);
127 u_int tlb_record_asids(u_long *, uint32_t);
128 int tlb_update(vaddr_t, uint32_t);
129 void tlb_enter(size_t, vaddr_t, uint32_t);
130 void tlb_read_indexed(size_t, struct tlbmask *);
131 void tlb_write_indexed(size_t, const struct tlbmask *);
132 void wbflush(void);
133
134 #ifdef MIPS1
135 void mips1_tlb_invalidate_all(void);
136
137 uint32_t tx3900_cp0_config_read(void);
138 #endif
139
140 #if (MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
141 uint32_t mips3_cp0_compare_read(void);
142 void mips3_cp0_compare_write(uint32_t);
143
144 uint32_t mips3_cp0_config_read(void);
145 void mips3_cp0_config_write(uint32_t);
146
147 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
148 uint32_t mipsNN_cp0_config1_read(void);
149 void mipsNN_cp0_config1_write(uint32_t);
150 uint32_t mipsNN_cp0_config2_read(void);
151 uint32_t mipsNN_cp0_config3_read(void);
152
153 intptr_t mipsNN_cp0_watchlo_read(u_int);
154 void mipsNN_cp0_watchlo_write(u_int, intptr_t);
155 uint32_t mipsNN_cp0_watchhi_read(u_int);
156 void mipsNN_cp0_watchhi_write(u_int, uint32_t);
157
158 #if (MIPS32R2 + MIPS64R2) > 0
159 void mipsNN_cp0_hwrena_write(uint32_t);
160 void mipsNN_cp0_userlocal_write(void *);
161 #endif
162 #endif
163
164 uint32_t mips3_cp0_count_read(void);
165 void mips3_cp0_count_write(uint32_t);
166
167 uint32_t mips3_cp0_wired_read(void);
168 void mips3_cp0_wired_write(uint32_t);
169 void mips3_cp0_pg_mask_write(uint32_t);
170
171 #if defined(__GNUC__) && !defined(__mips_o32)
172 static inline uint64_t
173 mips3_ld(const volatile uint64_t *va)
174 {
175 uint64_t rv;
176 #if defined(__mips_o32)
177 uint32_t sr;
178
179 sr = mips_cp0_status_read();
180 mips_cp0_status_write(sr & ~MIPS_SR_INT_IE);
181
182 __asm volatile(
183 ".set push \n\t"
184 ".set mips3 \n\t"
185 ".set noreorder \n\t"
186 ".set noat \n\t"
187 "ld %M0,0(%1) \n\t"
188 "dsll32 %L0,%M0,0 \n\t"
189 "dsra32 %M0,%M0,0 \n\t" /* high word */
190 "dsra32 %L0,%L0,0 \n\t" /* low word */
191 "ld %0,0(%1) \n\t"
192 ".set pop"
193 : "=d"(rv)
194 : "r"(va));
195
196 mips_cp0_status_write(sr);
197 #elif defined(_LP64)
198 rv = *va;
199 #else
200 __asm volatile("ld %0,0(%1)" : "=d"(rv) : "r"(va));
201 #endif
202
203 return rv;
204 }
205 static inline void
206 mips3_sd(volatile uint64_t *va, uint64_t v)
207 {
208 #if defined(__mips_o32)
209 uint32_t sr;
210
211 sr = mips_cp0_status_read();
212 mips_cp0_status_write(sr & ~MIPS_SR_INT_IE);
213
214 __asm volatile(
215 ".set push \n\t"
216 ".set mips3 \n\t"
217 ".set noreorder \n\t"
218 ".set noat \n\t"
219 "dsll32 %M0,%M0,0 \n\t"
220 "dsll32 %L0,%L0,0 \n\t"
221 "dsrl32 %L0,%L0,0 \n\t"
222 "or %0,%L0,%M0 \n\t"
223 "sd %0,0(%1) \n\t"
224 ".set pop"
225 : "=d"(v) : "0"(v), "r"(va));
226
227 mips_cp0_status_write(sr);
228 #elif defined(_LP64)
229 *va = v;
230 #else
231 __asm volatile("sd %0,0(%1)" :: "r"(v), "r"(va));
232 #endif
233 }
234 #else
235 uint64_t mips3_ld(volatile uint64_t *va);
236 void mips3_sd(volatile uint64_t *, uint64_t);
237 #endif /* __GNUC__ */
238 #endif /* (MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
239
240 #if (MIPS3 + MIPS4 + MIPS64 + MIPS64R2) > 0
241 static __inline uint32_t mips3_lw_a64(uint64_t addr) __unused;
242 static __inline void mips3_sw_a64(uint64_t addr, uint32_t val) __unused;
243
244 static __inline uint32_t
245 mips3_lw_a64(uint64_t addr)
246 {
247 uint32_t rv;
248 #if defined(__mips_o32)
249 uint32_t sr;
250
251 sr = mips_cp0_status_read();
252 mips_cp0_status_write((sr & ~MIPS_SR_INT_IE) | MIPS3_SR_KX);
253
254 __asm volatile (
255 ".set push \n\t"
256 ".set mips3 \n\t"
257 ".set noreorder \n\t"
258 ".set noat \n\t"
259 "dsll32 %M1,%M1,0 \n\t"
260 "dsll32 %L1,%L1,0 \n\t"
261 "dsrl32 %L1,%L1,0 \n\t"
262 "or %1,%M1,%L1 \n\t"
263 "lw %0, 0(%1) \n\t"
264 ".set pop"
265 : "=r"(rv), "=d"(addr)
266 : "1"(addr)
267 );
268
269 mips_cp0_status_write(sr);
270 #elif defined(__mips_n32)
271 uint32_t sr = mips_cp0_status_read();
272 mips_cp0_status_write((sr & ~MIPS_SR_INT_IE) | MIPS3_SR_KX);
273 __asm volatile("lw %0, 0(%1)" : "=r"(rv) : "d"(addr));
274 mips_cp0_status_write(sr);
275 #elif defined(_LP64)
276 rv = *(const uint32_t *)addr;
277 #else
278 #error unknown ABI
279 #endif
280 return (rv);
281 }
282
283 static __inline void
284 mips3_sw_a64(uint64_t addr, uint32_t val)
285 {
286 #if defined(__mips_o32)
287 uint32_t sr;
288
289 sr = mips_cp0_status_read();
290 mips_cp0_status_write((sr & ~MIPS_SR_INT_IE) | MIPS3_SR_KX);
291
292 __asm volatile (
293 ".set push \n\t"
294 ".set mips3 \n\t"
295 ".set noreorder \n\t"
296 ".set noat \n\t"
297 "dsll32 %M0,%M0,0 \n\t"
298 "dsll32 %L0,%L0,0 \n\t"
299 "dsrl32 %L0,%L0,0 \n\t"
300 "or %0,%M0,%L0 \n\t"
301 "sw %1, 0(%0) \n\t"
302 ".set pop"
303 : "=d"(addr): "r"(val), "0"(addr)
304 );
305
306 mips_cp0_status_write(sr);
307 #elif defined(__mips_n32)
308 uint32_t sr = mips_cp0_status_read();
309 mips_cp0_status_write((sr & ~MIPS_SR_INT_IE) | MIPS3_SR_KX);
310 __asm volatile("sw %1, 0(%0)" :: "d"(addr), "r"(val));
311 mips_cp0_status_write(sr);
312 #elif defined(_LP64)
313 *(uint32_t *)addr = val;
314 #else
315 #error unknown ABI
316 #endif
317 }
318 #endif /* (MIPS3 + MIPS4 + MIPS64 + MIPS64R2) > 0 */
319
320 #if (MIPS64 + MIPS64R2) > 0 && !defined(__mips_o32)
321 /* 64-bits address space accessor for n32, n64 ABI */
322
323 static __inline uint64_t mips64_ld_a64(uint64_t addr) __unused;
324 static __inline void mips64_sd_a64(uint64_t addr, uint64_t val) __unused;
325
326 static __inline uint64_t
327 mips64_ld_a64(uint64_t addr)
328 {
329 uint64_t rv;
330 #if defined(__mips_n32)
331 __asm volatile("ld %0, 0(%1)" : "=r"(rv) : "d"(addr));
332 #elif defined(_LP64)
333 rv = *(volatile uint64_t *)addr;
334 #else
335 #error unknown ABI
336 #endif
337 return (rv);
338 }
339
340 static __inline void
341 mips64_sd_a64(uint64_t addr, uint64_t val)
342 {
343 #if defined(__mips_n32)
344 __asm volatile("sd %1, 0(%0)" :: "d"(addr), "r"(val));
345 #elif defined(_LP64)
346 *(volatile uint64_t *)addr = val;
347 #else
348 #error unknown ABI
349 #endif
350 }
351 #endif /* (MIPS64 + MIPS64R2) > 0 */
352
353 /*
354 * A vector with an entry for each mips-ISA-level dependent
355 * locore function, and macros which jump through it.
356 */
357 typedef struct {
358 void (*ljv_cpu_switch_resume)(struct lwp *);
359 intptr_t ljv_lwp_trampoline;
360 void (*ljv_wbflush)(void);
361 void (*ljv_tlb_set_asid)(uint32_t pid);
362 void (*ljv_tlb_invalidate_asids)(uint32_t, uint32_t);
363 void (*ljv_tlb_invalidate_addr)(vaddr_t);
364 void (*ljv_tlb_invalidate_globals)(void);
365 void (*ljv_tlb_invalidate_all)(void);
366 u_int (*ljv_tlb_record_asids)(u_long *, uint32_t);
367 int (*ljv_tlb_update)(vaddr_t, uint32_t);
368 void (*ljv_tlb_enter)(size_t, vaddr_t, uint32_t);
369 void (*ljv_tlb_read_indexed)(size_t, struct tlbmask *);
370 void (*ljv_tlb_write_indexed)(size_t, const struct tlbmask *);
371 } mips_locore_jumpvec_t;
372
373 typedef struct {
374 u_int (*lav_atomic_cas_uint)(volatile u_int *, u_int, u_int);
375 u_long (*lav_atomic_cas_ulong)(volatile u_long *, u_long, u_long);
376 int (*lav_ucas_uint)(volatile u_int *, u_int, u_int, u_int *);
377 int (*lav_ucas_ulong)(volatile u_long *, u_long, u_long, u_long *);
378 void (*lav_mutex_enter)(kmutex_t *);
379 void (*lav_mutex_exit)(kmutex_t *);
380 void (*lav_mutex_spin_enter)(kmutex_t *);
381 void (*lav_mutex_spin_exit)(kmutex_t *);
382 } mips_locore_atomicvec_t;
383
384 void mips_set_wbflush(void (*)(void));
385 void mips_wait_idle(void);
386
387 void stacktrace(void);
388 void logstacktrace(void);
389
390 struct cpu_info;
391 struct splsw;
392
393 struct locoresw {
394 void (*lsw_wbflush)(void);
395 void (*lsw_cpu_idle)(void);
396 int (*lsw_send_ipi)(struct cpu_info *, int);
397 void (*lsw_cpu_offline_md)(void);
398 void (*lsw_cpu_init)(struct cpu_info *);
399 void (*lsw_cpu_run)(struct cpu_info *);
400 int (*lsw_bus_error)(unsigned int);
401 };
402
403 struct mips_vmfreelist {
404 paddr_t fl_start;
405 paddr_t fl_end;
406 int fl_freelist;
407 };
408
409 /*
410 * The "active" locore-function vector, and
411 */
412 extern const mips_locore_atomicvec_t mips_llsc_locore_atomicvec;
413
414 extern mips_locore_atomicvec_t mips_locore_atomicvec;
415 extern mips_locore_jumpvec_t mips_locore_jumpvec;
416 extern struct locoresw mips_locoresw;
417
418 struct splsw;
419 struct mips_vmfreelist;
420 struct phys_ram_seg;
421
422 void mips_vector_init(const struct splsw *, bool);
423 void mips_init_msgbuf(void);
424 void mips_init_lwp0_uarea(void);
425 void mips_page_physload(vaddr_t, vaddr_t,
426 const struct phys_ram_seg *, size_t,
427 const struct mips_vmfreelist *, size_t);
428
429
430 /*
431 * CPU identification, from PRID register.
432 */
433 #define MIPS_PRID_REV(x) (((x) >> 0) & 0x00ff)
434 #define MIPS_PRID_IMPL(x) (((x) >> 8) & 0x00ff)
435
436 /* pre-MIPS32/64 */
437 #define MIPS_PRID_RSVD(x) (((x) >> 16) & 0xffff)
438 #define MIPS_PRID_REV_MIN(x) ((MIPS_PRID_REV(x) >> 0) & 0x0f)
439 #define MIPS_PRID_REV_MAJ(x) ((MIPS_PRID_REV(x) >> 4) & 0x0f)
440
441 /* MIPS32/64 */
442 #define MIPS_PRID_CID(x) (((x) >> 16) & 0x00ff) /* Company ID */
443 #define MIPS_PRID_CID_PREHISTORIC 0x00 /* Not MIPS32/64 */
444 #define MIPS_PRID_CID_MTI 0x01 /* MIPS Technologies, Inc. */
445 #define MIPS_PRID_CID_BROADCOM 0x02 /* Broadcom */
446 #define MIPS_PRID_CID_ALCHEMY 0x03 /* Alchemy Semiconductor */
447 #define MIPS_PRID_CID_SIBYTE 0x04 /* SiByte */
448 #define MIPS_PRID_CID_SANDCRAFT 0x05 /* SandCraft */
449 #define MIPS_PRID_CID_PHILIPS 0x06 /* Philips */
450 #define MIPS_PRID_CID_TOSHIBA 0x07 /* Toshiba */
451 #define MIPS_PRID_CID_MICROSOFT 0x07 /* Microsoft also, sigh */
452 #define MIPS_PRID_CID_LSI 0x08 /* LSI */
453 /* 0x09 unannounced */
454 /* 0x0a unannounced */
455 #define MIPS_PRID_CID_LEXRA 0x0b /* Lexra */
456 #define MIPS_PRID_CID_RMI 0x0c /* RMI / NetLogic */
457 #define MIPS_PRID_CID_CAVIUM 0x0d /* Cavium */
458 #define MIPS_PRID_CID_INGENIC 0xe1
459 #define MIPS_PRID_COPTS(x) (((x) >> 24) & 0x00ff) /* Company Options */
460
461 #ifdef _KERNEL
462 /*
463 * Global variables used to communicate CPU type, and parameters
464 * such as cache size, from locore to higher-level code (e.g., pmap).
465 */
466 void mips_pagecopy(void *dst, void *src);
467 void mips_pagezero(void *dst);
468
469 #ifdef __HAVE_MIPS_MACHDEP_CACHE_CONFIG
470 void mips_machdep_cache_config(void);
471 #endif
472
473 /*
474 * trapframe argument passed to trap()
475 */
476
477 #if 0
478 #define TF_AST 0 /* really zero */
479 #define TF_V0 _R_V0
480 #define TF_V1 _R_V1
481 #define TF_A0 _R_A0
482 #define TF_A1 _R_A1
483 #define TF_A2 _R_A2
484 #define TF_A3 _R_A3
485 #define TF_T0 _R_T0
486 #define TF_T1 _R_T1
487 #define TF_T2 _R_T2
488 #define TF_T3 _R_T3
489
490 #if defined(__mips_n32) || defined(__mips_n64)
491 #define TF_A4 _R_A4
492 #define TF_A5 _R_A5
493 #define TF_A6 _R_A6
494 #define TF_A7 _R_A7
495 #else
496 #define TF_T4 _R_T4
497 #define TF_T5 _R_T5
498 #define TF_T6 _R_T6
499 #define TF_T7 _R_T7
500 #endif /* __mips_n32 || __mips_n64 */
501
502 #define TF_TA0 _R_TA0
503 #define TF_TA1 _R_TA1
504 #define TF_TA2 _R_TA2
505 #define TF_TA3 _R_TA3
506
507 #define TF_T8 _R_T8
508 #define TF_T9 _R_T9
509
510 #define TF_RA _R_RA
511 #define TF_SR _R_SR
512 #define TF_MULLO _R_MULLO
513 #define TF_MULHI _R_MULLO
514 #define TF_EPC _R_PC /* may be changed by trap() call */
515
516 #define TF_NREGS (sizeof(struct reg) / sizeof(mips_reg_t))
517 #endif
518
519 struct trapframe {
520 struct reg tf_registers;
521 #define tf_regs tf_registers.r_regs
522 uint32_t tf_ppl; /* previous priority level */
523 mips_reg_t tf_pad; /* for 8 byte aligned */
524 };
525
526 CTASSERT(sizeof(struct trapframe) % (4*sizeof(mips_reg_t)) == 0);
527
528 /*
529 * Stack frame for kernel traps. four args passed in registers.
530 * A trapframe is pointed to by the 5th arg, and a dummy sixth argument
531 * is used to avoid alignment problems
532 */
533
534 struct kernframe {
535 #if defined(__mips_o32) || defined(__mips_o64)
536 register_t cf_args[4 + 1];
537 #if defined(__mips_o32)
538 register_t cf_pad; /* (for 8 byte alignment) */
539 #endif
540 #endif
541 #if defined(__mips_n32) || defined(__mips_n64)
542 register_t cf_pad[2]; /* for 16 byte alignment */
543 #endif
544 register_t cf_sp;
545 register_t cf_ra;
546 struct trapframe cf_frame;
547 };
548
549 CTASSERT(sizeof(struct kernframe) % (2*sizeof(mips_reg_t)) == 0);
550
551 /*
552 * PRocessor IDentity TABle
553 */
554
555 struct pridtab {
556 int cpu_cid;
557 int cpu_pid;
558 int cpu_rev; /* -1 == wildcard */
559 int cpu_copts; /* -1 == wildcard */
560 int cpu_isa; /* -1 == probed (mips32/mips64) */
561 int cpu_ntlb; /* -1 == unknown, 0 == probed */
562 int cpu_flags;
563 u_int cpu_cp0flags; /* presence of some cp0 regs */
564 u_int cpu_cidflags; /* company-specific flags */
565 const char *cpu_name;
566 };
567
568 /*
569 * bitfield defines for cpu_cp0flags
570 */
571 #define MIPS_CP0FL_USE __BIT(0) /* use these flags */
572 #define MIPS_CP0FL_ECC __BIT(1)
573 #define MIPS_CP0FL_CACHE_ERR __BIT(2)
574 #define MIPS_CP0FL_EIRR __BIT(3)
575 #define MIPS_CP0FL_EIMR __BIT(4)
576 #define MIPS_CP0FL_EBASE __BIT(5)
577 #define MIPS_CP0FL_CONFIG __BIT(6)
578 #define MIPS_CP0FL_CONFIG1 __BIT(7)
579 #define MIPS_CP0FL_CONFIG2 __BIT(8)
580 #define MIPS_CP0FL_CONFIG3 __BIT(9)
581 #define MIPS_CP0FL_CONFIG4 __BIT(10)
582 #define MIPS_CP0FL_CONFIG5 __BIT(11)
583 #define MIPS_CP0FL_CONFIG6 __BIT(12)
584 #define MIPS_CP0FL_CONFIG7 __BIT(13)
585 #define MIPS_CP0FL_USERLOCAL __BIT(14)
586 #define MIPS_CP0FL_HWRENA __BIT(15)
587
588 /*
589 * cpu_cidflags defines, by company
590 */
591 /*
592 * RMI company-specific cpu_cidflags
593 */
594 #define MIPS_CIDFL_RMI_TYPE __BITS(2,0)
595 # define CIDFL_RMI_TYPE_XLR 0
596 # define CIDFL_RMI_TYPE_XLS 1
597 # define CIDFL_RMI_TYPE_XLP 2
598 #define MIPS_CIDFL_RMI_THREADS_MASK __BITS(6,3)
599 # define MIPS_CIDFL_RMI_THREADS_SHIFT 3
600 #define MIPS_CIDFL_RMI_CORES_MASK __BITS(10,7)
601 # define MIPS_CIDFL_RMI_CORES_SHIFT 7
602 # define LOG2_1 0
603 # define LOG2_2 1
604 # define LOG2_4 2
605 # define LOG2_8 3
606 # define MIPS_CIDFL_RMI_CPUS(ncores, nthreads) \
607 ((LOG2_ ## ncores << MIPS_CIDFL_RMI_CORES_SHIFT) \
608 |(LOG2_ ## nthreads << MIPS_CIDFL_RMI_THREADS_SHIFT))
609 # define MIPS_CIDFL_RMI_NTHREADS(cidfl) \
610 (1 << (((cidfl) & MIPS_CIDFL_RMI_THREADS_MASK) \
611 >> MIPS_CIDFL_RMI_THREADS_SHIFT))
612 # define MIPS_CIDFL_RMI_NCORES(cidfl) \
613 (1 << (((cidfl) & MIPS_CIDFL_RMI_CORES_MASK) \
614 >> MIPS_CIDFL_RMI_CORES_SHIFT))
615 #define MIPS_CIDFL_RMI_L2SZ_MASK __BITS(14,11)
616 # define MIPS_CIDFL_RMI_L2SZ_SHIFT 11
617 # define RMI_L2SZ_256KB 0
618 # define RMI_L2SZ_512KB 1
619 # define RMI_L2SZ_1MB 2
620 # define RMI_L2SZ_2MB 3
621 # define RMI_L2SZ_4MB 4
622 # define MIPS_CIDFL_RMI_L2(l2sz) \
623 (RMI_L2SZ_ ## l2sz << MIPS_CIDFL_RMI_L2SZ_SHIFT)
624 # define MIPS_CIDFL_RMI_L2SZ(cidfl) \
625 ((256*1024) << (((cidfl) & MIPS_CIDFL_RMI_L2SZ_MASK) \
626 >> MIPS_CIDFL_RMI_L2SZ_SHIFT))
627
628 #endif /* _KERNEL */
629 #endif /* _MIPS_LOCORE_H */
630