Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: psl.h,v 1.53 2024/04/07 17:08:00 rillig Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This software was developed by the Computer Systems Engineering group
      8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
      9  * contributed to Berkeley.
     10  *
     11  * All advertising materials mentioning features or use of this software
     12  * must display the following acknowledgement:
     13  *	This product includes software developed by the University of
     14  *	California, Lawrence Berkeley Laboratory.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)psl.h	8.1 (Berkeley) 6/11/93
     41  */
     42 
     43 #ifndef PSR_IMPL
     44 
     45 /*
     46  * SPARC Process Status Register (in psl.h for hysterical raisins).  This
     47  * doesn't exist on the V9.
     48  *
     49  * The picture in the Sun manuals looks like this:
     50  *	                                     1 1
     51  *	 31   28 27   24 23   20 19       14 3 2 11    8 7 6 5 4       0
     52  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
     53  *	|  impl |  ver  |  icc  |  reserved |E|E|  pil  |S|P|E|   CWP   |
     54  *	|       |       |n z v c|           |C|F|       | |S|T|         |
     55  *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
     56  */
     57 
     58 #define PSR_IMPL	0xf0000000	/* implementation */
     59 #define PSR_VER		0x0f000000	/* version */
     60 #define PSR_ICC		0x00f00000	/* integer condition codes */
     61 #define PSR_N		0x00800000	/* negative */
     62 #define PSR_Z		0x00400000	/* zero */
     63 #define PSR_O		0x00200000	/* overflow */
     64 #define PSR_C		0x00100000	/* carry */
     65 #define PSR_EC		0x00002000	/* coprocessor enable */
     66 #define PSR_EF		0x00001000	/* FP enable */
     67 #define PSR_PIL		0x00000f00	/* interrupt level */
     68 #define PSR_S		0x00000080	/* supervisor (kernel) mode */
     69 #define PSR_PS		0x00000040	/* previous supervisor mode (traps) */
     70 #define PSR_ET		0x00000020	/* trap enable */
     71 #define PSR_CWP		0x0000001f	/* current window pointer */
     72 
     73 #define PSR_BITS "\20\16EC\15EF\10S\7PS\6ET"
     74 
     75 /*
     76  * SPARC V9 CCR register
     77  */
     78 
     79 #define ICC_C	0x01L
     80 #define ICC_V	0x02L
     81 #define ICC_Z	0x04L
     82 #define ICC_N	0x08L
     83 #define XCC_SHIFT	4
     84 #define XCC_C	(ICC_C<<XCC_SHIFT)
     85 #define XCC_V	(ICC_V<<XCC_SHIFT)
     86 #define XCC_Z	(ICC_Z<<XCC_SHIFT)
     87 #define XCC_N	(ICC_N<<XCC_SHIFT)
     88 
     89 
     90 /*
     91  * SPARC V9 PSTATE register (what replaces the PSR in V9)
     92  *
     93  * Here's the layout:
     94  *
     95  *    11   10    9     8   7  6   5     4     3     2     1   0
     96  *  +------------------------------------------------------------+
     97  *  | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
     98  *  +------------------------------------------------------------+
     99  */
    100 
    101 #define PSTATE_IG	0x800	/* enable spitfire interrupt globals */
    102 #define PSTATE_MG	0x400	/* enable spitfire MMU globals */
    103 #define PSTATE_CLE	0x200	/* current little endian */
    104 #define PSTATE_TLE	0x100	/* traps little endian */
    105 #define PSTATE_MM	0x0c0	/* memory model */
    106 #define PSTATE_MM_TSO	0x000	/* total store order */
    107 #define PSTATE_MM_PSO	0x040	/* partial store order */
    108 #define PSTATE_MM_RMO	0x080	/* Relaxed memory order */
    109 #define PSTATE_RED	0x020	/* RED state */
    110 #define PSTATE_PEF	0x010	/* enable floating point */
    111 #define PSTATE_AM	0x008	/* 32-bit address masking */
    112 #define PSTATE_PRIV	0x004	/* privileged mode */
    113 #define PSTATE_IE	0x002	/* interrupt enable */
    114 #define PSTATE_AG	0x001	/* enable alternate globals */
    115 
    116 #define PSTATE_BITS "\177\020"						\
    117 	"b\013IG\0"	"b\012MG\0"	"b\011CLE\0"	"b\010TLE\0"	\
    118 			"F\006\002\0"	":\000MM_TSO\0"	":\001MM_PSO\0"	\
    119 	":\002MM_RMO\0"	"*?\0"		"b\005RED\0"	"b\004PEF\0"	\
    120 	"b\003AM\0"	"b\002PRIV\0"	"b\001IE\0"	"b\000AG\0"
    121 
    122 /*
    123  * 32-bit code requires TSO or at best PSO since that's what's supported on
    124  * SPARC V8 and earlier machines.
    125  *
    126  * 64-bit code sets the memory model in the ELF header.
    127  *
    128  * We're running kernel code in TSO for the moment so we don't need to worry
    129  * about possible memory barrier bugs.
    130  */
    131 
    132 #ifdef __arch64__
    133 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
    134 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG)
    135 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_PRIV)
    136 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
    137 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
    138 #define PSTATE_USER	(PSTATE_MM_RMO|PSTATE_IE)
    139 #else
    140 #define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
    141 #define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV|PSTATE_AG)
    142 #define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV)
    143 #define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
    144 #define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
    145 #define PSTATE_USER	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
    146 #endif
    147 
    148 
    149 /*
    150  * SPARC V9 TSTATE register
    151  *
    152  *   39 32 31 24 23 20  19   8	7 5 4   0
    153  *  +-----+-----+-----+--------+---+-----+
    154  *  | CCR | ASI |  -  | PSTATE | - | CWP |
    155  *  +-----+-----+-----+--------+---+-----+
    156  */
    157 
    158 #define TSTATE_CWP		0x01f
    159 #define TSTATE_PSTATE		0xfff00
    160 #define TSTATE_PSTATE_SHIFT	8
    161 #define TSTATE_ASI		0xff000000LL
    162 #define TSTATE_ASI_SHIFT	24
    163 #define TSTATE_CCR		0xff00000000LL
    164 #define TSTATE_CCR_SHIFT	32
    165 
    166 #define PSRCC_TO_TSTATE(x)	(((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-20))
    167 #define TSTATECCR_TO_PSR(x)	(((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-20))
    168 
    169 /*
    170  * These are here to simplify life.
    171  */
    172 #define TSTATE_IG	(PSTATE_IG<<TSTATE_PSTATE_SHIFT)
    173 #define TSTATE_MG	(PSTATE_MG<<TSTATE_PSTATE_SHIFT)
    174 #define TSTATE_CLE	(PSTATE_CLE<<TSTATE_PSTATE_SHIFT)
    175 #define TSTATE_TLE	(PSTATE_TLE<<TSTATE_PSTATE_SHIFT)
    176 #define TSTATE_MM	(PSTATE_MM<<TSTATE_PSTATE_SHIFT)
    177 #define TSTATE_MM_TSO	(PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT)
    178 #define TSTATE_MM_PSO	(PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT)
    179 #define TSTATE_MM_RMO	(PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT)
    180 #define TSTATE_RED	(PSTATE_RED<<TSTATE_PSTATE_SHIFT)
    181 #define TSTATE_PEF	(PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
    182 #define TSTATE_AM	(PSTATE_AM<<TSTATE_PSTATE_SHIFT)
    183 #define TSTATE_PRIV	(PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
    184 #define TSTATE_IE	(PSTATE_IE<<TSTATE_PSTATE_SHIFT)
    185 #define TSTATE_AG	(PSTATE_AG<<TSTATE_PSTATE_SHIFT)
    186 
    187 #define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
    188 
    189 #define TSTATE_KERN	((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
    190 #define TSTATE_USER	((PSTATE_USER)<<TSTATE_PSTATE_SHIFT)
    191 /*
    192  * SPARC V9 VER version register.
    193  *
    194  *  63   48 47  32 31  24 23 16 15    8 7 5 4      0
    195  * +-------+------+------+-----+-------+---+--------+
    196  * | manuf | impl | mask |  -  | maxtl | - | maxwin |
    197  * +-------+------+------+-----+-------+---+--------+
    198  *
    199  */
    200 
    201 #define VER_MANUF	0xffff000000000000LL
    202 #define VER_MANUF_SHIFT	48
    203 #define VER_IMPL	0x0000ffff00000000LL
    204 #define VER_IMPL_SHIFT	32
    205 #define VER_MASK	0x00000000ff000000LL
    206 #define VER_MASK_SHIFT	24
    207 #define VER_MAXTL	0x000000000000ff00LL
    208 #define VER_MAXTL_SHIFT	8
    209 #define VER_MAXWIN	0x000000000000001fLL
    210 
    211 /*
    212  * Here are a few things to help us transition between user and kernel mode:
    213  */
    214 
    215 /* Memory models */
    216 #define KERN_MM		PSTATE_MM_TSO
    217 #define USER_MM		PSTATE_MM_RMO
    218 
    219 /*
    220  * Register window handlers.  These point to generic routines that check the
    221  * stack pointer and then vector to the real handler.  We could optimize this
    222  * if we could guarantee only 32-bit or 64-bit stacks.
    223  */
    224 #define WSTATE_KERN	026
    225 #define WSTATE_USER	022
    226 
    227 #define CWP		0x01f
    228 
    229 /* 64-byte alignment -- this seems the best place to put this. */
    230 #define SPARC64_BLOCK_SIZE	64
    231 #define SPARC64_BLOCK_ALIGN	0x3f
    232 
    233 #if (defined(_KERNEL) || defined(_KMEMUSER)) && !defined(_LOCORE)
    234 typedef uint8_t ipl_t;
    235 typedef struct {
    236 	ipl_t _ipl;
    237 } ipl_cookie_t;
    238 #endif	/* _KERNEL|_KMEMUSER & !_LOCORE */
    239 
    240 #if defined(_KERNEL) && !defined(_LOCORE)
    241 
    242 /*
    243  * GCC pseudo-functions for manipulating PSR (primarily PIL field).
    244  */
    245 static __inline __attribute__((__always_inline__)) int
    246 getpsr(void)
    247 {
    248 	int psr;
    249 
    250 	__asm volatile("rd %%psr,%0" : "=r" (psr));
    251 	return (psr);
    252 }
    253 
    254 static __inline __attribute__((__always_inline__)) int
    255 getmid(void)
    256 {
    257 	int mid;
    258 
    259 	__asm volatile("rd %%tbr,%0" : "=r" (mid));
    260 	return ((mid >> 20) & 0x3);
    261 }
    262 
    263 static __inline __attribute__((__always_inline__)) void
    264 setpsr(int newpsr)
    265 {
    266 	__asm volatile("wr %0,0,%%psr" : : "r" (newpsr) : "memory");
    267 	__asm volatile("nop; nop; nop");
    268 }
    269 
    270 static __inline __attribute__((__always_inline__)) void
    271 spl0(void)
    272 {
    273 	int psr, oldipl;
    274 
    275 	/*
    276 	 * wrpsr xors two values: we choose old psr and old ipl here,
    277 	 * which gives us the same value as the old psr but with all
    278 	 * the old PIL bits turned off.
    279 	 */
    280 	__asm volatile("rd %%psr,%0" : "=r" (psr) : : "memory");
    281 	oldipl = psr & PSR_PIL;
    282 	__asm volatile("wr %0,%1,%%psr" : : "r" (psr), "r" (oldipl));
    283 
    284 	/*
    285 	 * Three instructions must execute before we can depend
    286 	 * on the bits to be changed.
    287 	 */
    288 	__asm volatile("nop; nop; nop");
    289 }
    290 
    291 /*
    292  * PIL 1 through 14 can use this macro.
    293  * (spl0 and splhigh are special since they put all 0s or all 1s
    294  * into the ipl field.)
    295  */
    296 #define	_SPLSET(name, newipl) \
    297 static __inline __attribute__((__always_inline__)) void name(void) \
    298 { \
    299 	int psr; \
    300 	__asm volatile("rd %%psr,%0" : "=r" (psr)); \
    301 	psr &= ~PSR_PIL; \
    302 	__asm volatile("wr %0,%1,%%psr" : : \
    303 	    "r" (psr), "n" ((newipl) << 8)); \
    304 	__asm volatile("nop; nop; nop" : : : "memory"); \
    305 }
    306 
    307 _SPLSET(spllowerschedclock, IPL_SCHED)
    308 
    309 static inline __always_inline ipl_cookie_t
    310 makeiplcookie(ipl_t ipl)
    311 {
    312 
    313 	return (ipl_cookie_t){._ipl = ipl};
    314 }
    315 
    316 /* Raise IPL and return previous value */
    317 static __inline __always_inline int
    318 splraiseipl(ipl_cookie_t icookie)
    319 {
    320 	int newipl = icookie._ipl;
    321 	int psr, oldipl;
    322 
    323 	__asm volatile("rd %%psr,%0" : "=r" (psr));
    324 
    325 	oldipl = psr & PSR_PIL;
    326 	newipl <<= 8;
    327 	if (newipl <= oldipl)
    328 		return (oldipl);
    329 
    330 	psr = (psr & ~oldipl) | newipl;
    331 
    332 	__asm volatile("wr %0,0,%%psr" : : "r" (psr));
    333 	__asm volatile("nop; nop; nop" : : : "memory");
    334 
    335 	return (oldipl);
    336 }
    337 
    338 #include <sys/spl.h>
    339 
    340 #define	splausoft()	splraiseipl(makeiplcookie(IPL_SOFTAUDIO))
    341 #define	splfdsoft()	splraiseipl(makeiplcookie(IPL_SOFTFDC))
    342 
    343 #define	splfd()		splraiseipl(makeiplcookie(IPL_FD))
    344 #define	splts102()	splraiseipl(makeiplcookie(IPL_TS102))
    345 
    346 #define	splzs()		splraiseipl(makeiplcookie(IPL_ZS))
    347 
    348 /* splx does not have a return value */
    349 static __inline __attribute__((__always_inline__)) void
    350 splx(int newipl)
    351 {
    352 	int psr;
    353 
    354 	__asm volatile("rd %%psr,%0" : "=r" (psr) : : "memory");
    355 	__asm volatile("wr %0,%1,%%psr" : : \
    356 	    "r" (psr & ~PSR_PIL), "rn" (newipl));
    357 	__asm volatile("nop; nop; nop");
    358 }
    359 #endif /* KERNEL && !_LOCORE */
    360 
    361 #endif /* PSR_IMPL */
    362