psl.h revision 1.64 1 /* $NetBSD: psl.h,v 1.64 2023/09/02 05:51:57 jdc Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)psl.h 8.1 (Berkeley) 6/11/93
41 */
42
43 #ifndef PSR_IMPL
44
45 /*
46 * SPARC Process Status Register (in psl.h for hysterical raisins). This
47 * doesn't exist on the V9.
48 *
49 * The picture in the Sun manuals looks like this:
50 * 1 1
51 * 31 28 27 24 23 20 19 14 3 2 11 8 7 6 5 4 0
52 * +-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
53 * | impl | ver | icc | reserved |E|E| pil |S|P|E| CWP |
54 * | | |n z v c| |C|F| | |S|T| |
55 * +-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
56 */
57
58 #define PSR_IMPL 0xf0000000 /* implementation */
59 #define PSR_VER 0x0f000000 /* version */
60 #define PSR_ICC 0x00f00000 /* integer condition codes */
61 #define PSR_N 0x00800000 /* negative */
62 #define PSR_Z 0x00400000 /* zero */
63 #define PSR_O 0x00200000 /* overflow */
64 #define PSR_C 0x00100000 /* carry */
65 #define PSR_EC 0x00002000 /* coprocessor enable */
66 #define PSR_EF 0x00001000 /* FP enable */
67 #define PSR_PIL 0x00000f00 /* interrupt level */
68 #define PSR_S 0x00000080 /* supervisor (kernel) mode */
69 #define PSR_PS 0x00000040 /* previous supervisor mode (traps) */
70 #define PSR_ET 0x00000020 /* trap enable */
71 #define PSR_CWP 0x0000001f /* current window pointer */
72
73 #define PSR_BITS "\20\16EC\15EF\10S\7PS\6ET"
74
75 /* Interesting spl()s */
76 #define PIL_BIO 5
77 #define PIL_VIDEO 5
78 #define PIL_TTY 6
79 #define PIL_LPT 6
80 #define PIL_NET 6
81 #define PIL_VM 7
82 #define PIL_AUD 8
83 #define PIL_CLOCK 10
84 #define PIL_FD 11
85 #define PIL_SER 12
86 #define PIL_STATCLOCK 14
87 #define PIL_HIGH 15
88 #define PIL_SCHED PIL_CLOCK
89 #define PIL_LOCK PIL_HIGH
90
91 /*
92 * SPARC V9 CCR register
93 */
94
95 #define ICC_C 0x01L
96 #define ICC_V 0x02L
97 #define ICC_Z 0x04L
98 #define ICC_N 0x08L
99 #define XCC_SHIFT 4
100 #define XCC_C (ICC_C<<XCC_SHIFT)
101 #define XCC_V (ICC_V<<XCC_SHIFT)
102 #define XCC_Z (ICC_Z<<XCC_SHIFT)
103 #define XCC_N (ICC_N<<XCC_SHIFT)
104
105
106 /*
107 * SPARC V9 PSTATE register (what replaces the PSR in V9)
108 *
109 * Here's the layout:
110 *
111 * 11 10 9 8 7 6 5 4 3 2 1 0
112 * +------------------------------------------------------------+
113 * | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
114 * +------------------------------------------------------------+
115 */
116
117 #define PSTATE_IG 0x800 /* enable spitfire interrupt globals */
118 #define PSTATE_MG 0x400 /* enable spitfire MMU globals */
119 #define PSTATE_CLE 0x200 /* current little endian */
120 #define PSTATE_TLE 0x100 /* traps little endian */
121 #define PSTATE_MM 0x0c0 /* memory model */
122 #define PSTATE_MM_TSO 0x000 /* total store order */
123 #define PSTATE_MM_PSO 0x040 /* partial store order */
124 #define PSTATE_MM_RMO 0x080 /* Relaxed memory order */
125 #define PSTATE_RED 0x020 /* RED state */
126 #define PSTATE_PEF 0x010 /* enable floating point */
127 #define PSTATE_AM 0x008 /* 32-bit address masking */
128 #define PSTATE_PRIV 0x004 /* privileged mode */
129 #define PSTATE_IE 0x002 /* interrupt enable */
130 #define PSTATE_AG 0x001 /* enable alternate globals */
131
132 #define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
133
134
135 /*
136 * 32-bit code requires TSO or at best PSO since that's what's supported on
137 * SPARC V8 and earlier machines.
138 *
139 * 64-bit code sets the memory model in the ELF header.
140 *
141 * We're running kernel code in TSO for the moment so we don't need to worry
142 * about possible memory barrier bugs.
143 */
144
145 #ifdef __arch64__
146 #define PSTATE_PROM (PSTATE_MM_TSO|PSTATE_PRIV)
147 #define PSTATE_NUCLEUS (PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG)
148 #define PSTATE_KERN (PSTATE_MM_TSO|PSTATE_PRIV)
149 #define PSTATE_INTR (PSTATE_KERN|PSTATE_IE)
150 #define PSTATE_USER32 (PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
151 #define PSTATE_USER (PSTATE_MM_RMO|PSTATE_IE)
152 #else
153 #define PSTATE_PROM (PSTATE_MM_TSO|PSTATE_PRIV)
154 #define PSTATE_NUCLEUS (PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV|PSTATE_AG)
155 #define PSTATE_KERN (PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV)
156 #define PSTATE_INTR (PSTATE_KERN|PSTATE_IE)
157 #define PSTATE_USER32 (PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
158 #define PSTATE_USER (PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
159 #endif
160
161
162 /*
163 * SPARC V9 TSTATE register
164 *
165 * 39 32 31 24 23 20 19 8 7 5 4 0
166 * +-----+-----+-----+--------+---+-----+
167 * | CCR | ASI | - | PSTATE | - | CWP |
168 * +-----+-----+-----+--------+---+-----+
169 */
170
171 #define TSTATE_CWP 0x01f
172 #define TSTATE_PSTATE 0xfff00
173 #define TSTATE_PSTATE_SHIFT 8
174 #define TSTATE_ASI 0xff000000LL
175 #define TSTATE_ASI_SHIFT 24
176 #define TSTATE_CCR 0xff00000000LL
177 #define TSTATE_CCR_SHIFT 32
178
179 #define PSRCC_TO_TSTATE(x) (((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-20))
180 #define TSTATECCR_TO_PSR(x) (((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-20))
181
182 /*
183 * These are here to simplify life.
184 */
185 #define TSTATE_IG (PSTATE_IG<<TSTATE_PSTATE_SHIFT)
186 #define TSTATE_MG (PSTATE_MG<<TSTATE_PSTATE_SHIFT)
187 #define TSTATE_CLE (PSTATE_CLE<<TSTATE_PSTATE_SHIFT)
188 #define TSTATE_TLE (PSTATE_TLE<<TSTATE_PSTATE_SHIFT)
189 #define TSTATE_MM (PSTATE_MM<<TSTATE_PSTATE_SHIFT)
190 #define TSTATE_MM_TSO (PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT)
191 #define TSTATE_MM_PSO (PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT)
192 #define TSTATE_MM_RMO (PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT)
193 #define TSTATE_RED (PSTATE_RED<<TSTATE_PSTATE_SHIFT)
194 #define TSTATE_PEF (PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
195 #define TSTATE_AM (PSTATE_AM<<TSTATE_PSTATE_SHIFT)
196 #define TSTATE_PRIV (PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
197 #define TSTATE_IE (PSTATE_IE<<TSTATE_PSTATE_SHIFT)
198 #define TSTATE_AG (PSTATE_AG<<TSTATE_PSTATE_SHIFT)
199
200 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
201
202 #define TSTATE_KERN ((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
203 #define TSTATE_USER ((PSTATE_USER)<<TSTATE_PSTATE_SHIFT)
204 /*
205 * SPARC V9 VER version register.
206 *
207 * 63 48 47 32 31 24 23 16 15 8 7 5 4 0
208 * +-------+------+------+-----+-------+---+--------+
209 * | manuf | impl | mask | - | maxtl | - | maxwin |
210 * +-------+------+------+-----+-------+---+--------+
211 *
212 */
213
214 #define VER_MANUF 0xffff000000000000LL
215 #define VER_MANUF_SHIFT 48
216 #define VER_IMPL 0x0000ffff00000000LL
217 #define VER_IMPL_SHIFT 32
218 #define VER_MASK 0x00000000ff000000LL
219 #define VER_MASK_SHIFT 24
220 #define VER_MAXTL 0x000000000000ff00LL
221 #define VER_MAXTL_SHIFT 8
222 #define VER_MAXWIN 0x000000000000001fLL
223
224 #define MANUF_FUJITSU 0x04 /* Fujitsu SPARC64 */
225 #define MANUF_SUN 0x17 /* Sun UltraSPARC */
226
227 #define IMPL_SPARC64 0x01 /* SPARC64 */
228 #define IMPL_SPARC64_II 0x02 /* SPARC64-II */
229 #define IMPL_SPARC64_III 0x03 /* SPARC64-III */
230 #define IMPL_SPARC64_IV 0x04 /* SPARC64-IV */
231 #define IMPL_ZEUS 0x05 /* SPARC64-V */
232 #define IMPL_OLYMPUS_C 0x06 /* SPARC64-VI */
233 #define IMPL_JUPITER 0x07 /* SPARC64-VII */
234
235 #define IMPL_SPITFIRE 0x10 /* UltraSPARC-I */
236 #define IMPL_BLACKBIRD 0x11 /* UltraSPARC-II */
237 #define IMPL_SABRE 0x12 /* UltraSPARC-IIi */
238 #define IMPL_HUMMINGBIRD 0x13 /* UltraSPARC-IIe */
239 #define IMPL_CHEETAH 0x14 /* UltraSPARC-III */
240 #define IMPL_CHEETAH_PLUS 0x15 /* UltraSPARC-III+ */
241 #define IMPL_JALAPENO 0x16 /* UltraSPARC-IIIi */
242 #define IMPL_JAGUAR 0x18 /* UltraSPARC-IV */
243 #define IMPL_PANTHER 0x19 /* UltraSPARC-IV+ */
244 #define IMPL_SERRANO 0x22 /* UltraSPARC-IIIi+ */
245
246 /*
247 * Here are a few things to help us transition between user and kernel mode:
248 */
249
250 /* Memory models */
251 #define KERN_MM PSTATE_MM_TSO
252 #define USER_MM PSTATE_MM_RMO
253
254 /*
255 * Register window handlers. These point to generic routines that check the
256 * stack pointer and then vector to the real handler. We could optimize this
257 * if we could guarantee only 32-bit or 64-bit stacks.
258 */
259 #define WSTATE_KERN 026
260 #define WSTATE_USER 022
261
262 #define CWP 0x01f
263
264 /*
265 * UltraSPARC Ancillary State Registers
266 */
267 #define SET_SOFTINT %asr20 /* Set Software Interrupt register bits */
268 #define CLEAR_SOFTINT %asr21 /* Clear Software Interrupt register bits */
269 #define SOFTINT %asr22 /* Software Interrupt register */
270 #define TICK_CMPR %asr23 /* TICK Compare register */
271 #define STICK %asr24 /* STICK register */
272 #define STICK_CMPR %asr25 /* STICK Compare register */
273
274 /* SOFTINT bit descriptions */
275 #define TICK_INT 0x01 /* CPU clock timer interrupt */
276 #define STICK_INT (0x1<<16) /* system clock timer interrupt */
277
278 /* 64-byte alignment -- this seems the best place to put this. */
279 #define SPARC64_BLOCK_SIZE 64
280 #define SPARC64_BLOCK_ALIGN 0x3f
281
282
283 #if (defined(_KERNEL) || defined(_KMEMUSER)) && !defined(_LOCORE)
284 typedef uint8_t ipl_t;
285 typedef struct {
286 ipl_t _ipl;
287 } ipl_cookie_t;
288 #endif /* _KERNEL|_KMEMUSER&!_LOCORE */
289
290 #if defined(_KERNEL) && !defined(_LOCORE)
291
292 #if defined(_KERNEL_OPT)
293 #include "opt_sparc_arch.h"
294 #endif
295
296 /*
297 * Put "memory" to asm inline on sun4v to avoid issuing rdpr %ver
298 * before checking cputyp as a result of code moving by compiler
299 * optimization.
300 */
301 #ifdef SUN4V
302 #define constasm_clobbers "memory"
303 #else
304 #define constasm_clobbers
305 #endif
306
307 /*
308 * Inlines for manipulating privileged and ancillary state registers
309 */
310 #define SPARC64_RDCONST_DEF(rd, name, reg, type) \
311 static __inline __constfunc type get##name(void) \
312 { \
313 type _val; \
314 __asm(#rd " %" #reg ",%0" : "=r" (_val) : : constasm_clobbers); \
315 return _val; \
316 }
317 #define SPARC64_RD_DEF(rd, name, reg, type) \
318 static __inline type get##name(void) \
319 { \
320 type _val; \
321 __asm volatile(#rd " %" #reg ",%0" : "=r" (_val)); \
322 return _val; \
323 }
324 #define SPARC64_WR_DEF(wr, name, reg, type) \
325 static __inline void set##name(type _val) \
326 { \
327 __asm volatile(#wr " %0,0,%" #reg : : "r" (_val) : "memory"); \
328 }
329
330 #ifdef __arch64__
331 #define SPARC64_RDCONST64_DEF(rd, name, reg) \
332 SPARC64_RDCONST_DEF(rd, name, reg, uint64_t)
333 #define SPARC64_RD64_DEF(rd, name, reg) SPARC64_RD_DEF(rd, name, reg, uint64_t)
334 #define SPARC64_WR64_DEF(wr, name, reg) SPARC64_WR_DEF(wr, name, reg, uint64_t)
335 #else
336 #define SPARC64_RDCONST64_DEF(rd, name, reg) \
337 static __inline __constfunc uint64_t get##name(void) \
338 { \
339 uint32_t _hi, _lo; \
340 __asm(#rd " %" #reg ",%0; srl %0,0,%1; srlx %0,32,%0" \
341 : "=r" (_hi), "=r" (_lo) : : constasm_clobbers); \
342 return ((uint64_t)_hi << 32) | _lo; \
343 }
344 #define SPARC64_RD64_DEF(rd, name, reg) \
345 static __inline uint64_t get##name(void) \
346 { \
347 uint32_t _hi, _lo; \
348 __asm volatile(#rd " %" #reg ",%0; srl %0,0,%1; srlx %0,32,%0" \
349 : "=r" (_hi), "=r" (_lo)); \
350 return ((uint64_t)_hi << 32) | _lo; \
351 }
352 #define SPARC64_WR64_DEF(wr, name, reg) \
353 static __inline void set##name(uint64_t _val) \
354 { \
355 uint32_t _hi = _val >> 32, _lo = _val; \
356 __asm volatile("sllx %1,32,%0; or %0,%2,%0; " #wr " %0,0,%" #reg\
357 : "=&r" (_hi) /* scratch register */ \
358 : "r" (_hi), "r" (_lo) : "memory"); \
359 }
360 #endif
361
362 #define SPARC64_RDPR_DEF(name, reg, type) SPARC64_RD_DEF(rdpr, name, reg, type)
363 #define SPARC64_WRPR_DEF(name, reg, type) SPARC64_WR_DEF(wrpr, name, reg, type)
364 #define SPARC64_RDPR64_DEF(name, reg) SPARC64_RD64_DEF(rdpr, name, reg)
365 #define SPARC64_WRPR64_DEF(name, reg) SPARC64_WR64_DEF(wrpr, name, reg)
366 #define SPARC64_RDASR64_DEF(name, reg) SPARC64_RD64_DEF(rd, name, reg)
367 #define SPARC64_WRASR64_DEF(name, reg) SPARC64_WR64_DEF(wr, name, reg)
368
369 /* Tick Register (PR 4) */
370 SPARC64_RDPR64_DEF(tick, %tick) /* gettick() */
371 SPARC64_WRPR64_DEF(tick, %tick) /* settick() */
372
373 /* Processor State Register (PR 6) */
374 SPARC64_RDPR_DEF(pstate, %pstate, int) /* getpstate() */
375 SPARC64_WRPR_DEF(pstate, %pstate, int) /* setpstate() */
376
377 /* Trap Level Register (PR 7) */
378 SPARC64_RDPR_DEF(tl, %tl, int) /* gettl() */
379
380 /* Current Window Pointer Register (PR 9) */
381 SPARC64_RDPR_DEF(cwp, %cwp, int) /* getcwp() */
382 SPARC64_WRPR_DEF(cwp, %cwp, int) /* setcwp() */
383
384 /* Version Register (PR 31) */
385 SPARC64_RDCONST64_DEF(rdpr, ver, %ver) /* getver() */
386
387 /* System Tick Register (ASR 24) */
388 SPARC64_RDASR64_DEF(stick, STICK) /* getstick() */
389 SPARC64_WRASR64_DEF(stick, STICK) /* setstick() */
390
391 /* System Tick Compare Register (ASR 25) */
392 SPARC64_RDASR64_DEF(stickcmpr, STICK_CMPR) /* getstickcmpr() */
393
394 /* Some simple macros to check the cpu type. */
395 #define GETVER_CPU_MASK() ((getver() & VER_MASK) >> VER_MASK_SHIFT)
396 #define GETVER_CPU_IMPL() ((getver() & VER_IMPL) >> VER_IMPL_SHIFT)
397 #define GETVER_CPU_MANUF() ((getver() & VER_MANUF) >> VER_MANUF_SHIFT)
398 #define CPU_IS_SPITFIRE() (GETVER_CPU_IMPL() == IMPL_SPITFIRE)
399 #define CPU_IS_HUMMINGBIRD() (GETVER_CPU_IMPL() == IMPL_HUMMINGBIRD)
400 #define CPU_IS_USIIIi() ((GETVER_CPU_IMPL() == IMPL_JALAPENO) || \
401 (GETVER_CPU_IMPL() == IMPL_SERRANO))
402 #define CPU_IS_USIII_UP() (GETVER_CPU_IMPL() >= IMPL_CHEETAH)
403 #define CPU_IS_SPARC64_V_UP() (GETVER_CPU_MANUF() == MANUF_FUJITSU && \
404 GETVER_CPU_IMPL() >= IMPL_ZEUS)
405
406 static __inline int
407 intr_disable(void)
408 {
409 int pstate = getpstate();
410
411 setpstate(pstate & ~PSTATE_IE);
412 return pstate;
413 }
414
415 static __inline void
416 intr_restore(int pstate)
417 {
418 setpstate(pstate);
419 }
420
421 /*
422 * GCC pseudo-functions for manipulating PIL
423 */
424
425 #ifdef SPLDEBUG
426 void prom_printf(const char *fmt, ...);
427 extern int printspl;
428 #define SPLPRINT(x) \
429 { \
430 if (printspl) { \
431 int i = 10000000; \
432 prom_printf x ; \
433 while (i--) \
434 ; \
435 } \
436 }
437 #define SPL(name, newpil) \
438 static __inline int name##X(const char* file, int line) \
439 { \
440 int oldpil; \
441 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
442 SPLPRINT(("{%s:%d %d=>%d}", file, line, oldpil, newpil)); \
443 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
444 return (oldpil); \
445 }
446 /* A non-priority-decreasing version of SPL */
447 #define SPLHOLD(name, newpil) \
448 static __inline int name##X(const char* file, int line) \
449 { \
450 int oldpil; \
451 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
452 if (newpil <= oldpil) \
453 return oldpil; \
454 SPLPRINT(("{%s:%d %d->!d}", file, line, oldpil, newpil)); \
455 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
456 return (oldpil); \
457 }
458
459 #else
460 #define SPLPRINT(x)
461 #define SPL(name, newpil) \
462 static __inline __always_inline int name(void) \
463 { \
464 int oldpil; \
465 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
466 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
467 return (oldpil); \
468 }
469 /* A non-priority-decreasing version of SPL */
470 #define SPLHOLD(name, newpil) \
471 static __inline __always_inline int name(void) \
472 { \
473 int oldpil; \
474 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil)); \
475 if (newpil <= oldpil) \
476 return oldpil; \
477 __asm volatile("wrpr %%g0,%0,%%pil" : : "n" (newpil) : "memory"); \
478 return (oldpil); \
479 }
480 #endif
481
482 static __inline ipl_cookie_t
483 makeiplcookie(ipl_t ipl)
484 {
485
486 return (ipl_cookie_t){._ipl = ipl};
487 }
488
489 static __inline int __attribute__((__unused__))
490 splraiseipl(ipl_cookie_t icookie)
491 {
492 int newpil = icookie._ipl;
493 int oldpil;
494
495 /*
496 * NetBSD/sparc64's IPL_* constants equate directly to the
497 * corresponding PIL_* names; no need to map them here.
498 */
499 __asm volatile("rdpr %%pil,%0" : "=r" (oldpil));
500 if (newpil <= oldpil)
501 return (oldpil);
502 __asm volatile("wrpr %0,0,%%pil" : : "r" (newpil) : "memory");
503 return (oldpil);
504 }
505
506 SPL(spl0, 0)
507
508 SPLHOLD(splsoftint, 1)
509 #define splsoftclock splsoftint
510 #define splsoftnet splsoftint
511
512 SPLHOLD(splsoftserial, 4)
513
514 /*
515 * Memory allocation (must be as high as highest network, tty, or disk device)
516 */
517 SPLHOLD(splvm, PIL_VM)
518
519 SPLHOLD(splsched, PIL_SCHED)
520
521 SPLHOLD(splhigh, PIL_HIGH)
522
523 /* splx does not have a return value */
524 #ifdef SPLDEBUG
525 #define spl0() spl0X(__FILE__, __LINE__)
526 #define splsoftint() splsoftintX(__FILE__, __LINE__)
527 #define splsoftserial() splsoftserialX(__FILE__, __LINE__)
528 #define splausoft() splausoftX(__FILE__, __LINE__)
529 #define splfdsoft() splfdsoftX(__FILE__, __LINE__)
530 #define splvm() splvmX(__FILE__, __LINE__)
531 #define splclock() splclockX(__FILE__, __LINE__)
532 #define splfd() splfdX(__FILE__, __LINE__)
533 #define splzs() splzsX(__FILE__, __LINE__)
534 #define splserial() splzerialX(__FILE__, __LINE__)
535 #define splaudio() splaudioX(__FILE__, __LINE__)
536 #define splstatclock() splstatclockX(__FILE__, __LINE__)
537 #define splsched() splschedX(__FILE__, __LINE__)
538 #define spllock() spllockX(__FILE__, __LINE__)
539 #define splhigh() splhighX(__FILE__, __LINE__)
540 #define splx(x) splxX((x),__FILE__, __LINE__)
541
542 static __inline void splxX(int newpil, const char *file, int line)
543 #else
544 static __inline __always_inline void splx(int newpil)
545 #endif
546 {
547 #ifdef SPLDEBUG
548 int pil;
549
550 __asm volatile("rdpr %%pil,%0" : "=r" (pil));
551 SPLPRINT(("{%d->%d}", pil, newpil));
552 #endif
553 __asm volatile("wrpr %%g0,%0,%%pil" : : "rn" (newpil) : "memory");
554 }
555 #endif /* KERNEL && !_LOCORE */
556
557 #endif /* PSR_IMPL */
558